diff --git a/newrelic/agent.py b/newrelic/agent.py index 4c0718626..76f02b8e0 100644 --- a/newrelic/agent.py +++ b/newrelic/agent.py @@ -139,6 +139,9 @@ def __asgi_application(*args, **kwargs): from newrelic.api.html_insertion import verify_body_exists as __verify_body_exists from newrelic.api.lambda_handler import LambdaHandlerWrapper as __LambdaHandlerWrapper from newrelic.api.lambda_handler import lambda_handler as __lambda_handler +from newrelic.api.llm_custom_attributes import ( + WithLlmCustomAttributes as __WithLlmCustomAttributes, +) from newrelic.api.message_trace import MessageTrace as __MessageTrace from newrelic.api.message_trace import MessageTraceWrapper as __MessageTraceWrapper from newrelic.api.message_trace import message_trace as __message_trace @@ -156,7 +159,9 @@ def __asgi_application(*args, **kwargs): from newrelic.api.ml_model import ( record_llm_feedback_event as __record_llm_feedback_event, ) -from newrelic.api.ml_model import set_llm_token_count_callback as __set_llm_token_count_callback +from newrelic.api.ml_model import ( + set_llm_token_count_callback as __set_llm_token_count_callback, +) from newrelic.api.ml_model import wrap_mlmodel as __wrap_mlmodel from newrelic.api.profile_trace import ProfileTraceWrapper as __ProfileTraceWrapper from newrelic.api.profile_trace import profile_trace as __profile_trace @@ -251,6 +256,7 @@ def __asgi_application(*args, **kwargs): record_custom_event = __wrap_api_call(__record_custom_event, "record_custom_event") record_log_event = __wrap_api_call(__record_log_event, "record_log_event") record_ml_event = __wrap_api_call(__record_ml_event, "record_ml_event") +WithLlmCustomAttributes = __wrap_api_call(__WithLlmCustomAttributes, "WithLlmCustomAttributes") accept_distributed_trace_payload = __wrap_api_call( __accept_distributed_trace_payload, "accept_distributed_trace_payload" ) diff --git a/newrelic/api/llm_custom_attributes.py b/newrelic/api/llm_custom_attributes.py new file mode 100644 index 000000000..37745ba06 --- /dev/null +++ b/newrelic/api/llm_custom_attributes.py @@ -0,0 +1,47 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from newrelic.api.transaction import current_transaction + +_logger = logging.getLogger(__name__) + + +class WithLlmCustomAttributes(object): + def __init__(self, custom_attr_dict): + transaction = current_transaction() + if not custom_attr_dict or not isinstance(custom_attr_dict, dict): + raise TypeError( + "custom_attr_dict must be a non-empty dictionary. Received type: %s" % type(custom_attr_dict) + ) + + # Add "llm." prefix to all keys in attribute dictionary + context_attrs = {k if k.startswith("llm.") else f"llm.{k}": v for k, v in custom_attr_dict.items()} + + self.attr_dict = context_attrs + self.transaction = transaction + + def __enter__(self): + if not self.transaction: + _logger.warning("WithLlmCustomAttributes must be called within the scope of a transaction.") + return self + + self.transaction._llm_context_attrs = self.attr_dict + return self + + def __exit__(self, exc, value, tb): + # Clear out context attributes once we leave the current context + if self.transaction: + del self.transaction._llm_context_attrs diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index f281c9609..dc25d6318 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -787,6 +787,10 @@ def handle_chat_completion_event(transaction, bedrock_attrs): custom_attrs_dict = transaction._custom_params llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + llm_context_attrs = getattr(transaction, "_llm_context_attrs", None) + if llm_context_attrs: + llm_metadata_dict.update(llm_context_attrs) + span_id = bedrock_attrs.get("span_id", None) trace_id = bedrock_attrs.get("trace_id", None) request_id = bedrock_attrs.get("request_id", None) diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py index c8353e74f..2ae76ef2c 100644 --- a/newrelic/hooks/mlmodel_langchain.py +++ b/newrelic/hooks/mlmodel_langchain.py @@ -697,7 +697,7 @@ def _get_run_manager_info(transaction, run_args, instance, completion_id): # metadata and tags are keys in the config parameter. metadata = {} metadata.update((run_args.get("config") or {}).get("metadata") or {}) - # Do not report intenral nr_completion_id in metadata. + # Do not report internal nr_completion_id in metadata. metadata = {key: value for key, value in metadata.items() if key != "nr_completion_id"} tags = [] tags.extend((run_args.get("config") or {}).get("tags") or []) @@ -708,6 +708,10 @@ def _get_llm_metadata(transaction): # Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events custom_attrs_dict = transaction._custom_params llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + llm_context_attrs = getattr(transaction, "_llm_context_attrs", None) + if llm_context_attrs: + llm_metadata_dict.update(llm_context_attrs) + return llm_metadata_dict diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 96228fd85..b52fbe27c 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -927,8 +927,13 @@ def is_stream(wrapped, args, kwargs): def _get_llm_attributes(transaction): """Returns llm.* custom attributes off of the transaction.""" custom_attrs_dict = transaction._custom_params - llm_metadata = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} - return llm_metadata + llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + + llm_context_attrs = getattr(transaction, "_llm_context_attrs", None) + if llm_context_attrs: + llm_metadata_dict.update(llm_context_attrs) + + return llm_metadata_dict def instrument_openai_api_resources_embedding(module): diff --git a/tests/agent_features/test_llm_custom_attributes.py b/tests/agent_features/test_llm_custom_attributes.py new file mode 100644 index 000000000..1f02c231c --- /dev/null +++ b/tests/agent_features/test_llm_custom_attributes.py @@ -0,0 +1,50 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes +from newrelic.api.transaction import current_transaction + + +@background_task() +def test_llm_custom_attributes(): + transaction = current_transaction() + with WithLlmCustomAttributes({"test": "attr", "test1": "attr1"}): + assert transaction._llm_context_attrs == {"llm.test": "attr", "llm.test1": "attr1"} + + assert not hasattr(transaction, "_llm_context_attrs") + + +@pytest.mark.parametrize("context_attrs", (None, "not-a-dict")) +@background_task() +def test_llm_custom_attributes_no_attrs(context_attrs): + transaction = current_transaction() + + with pytest.raises(TypeError): + with WithLlmCustomAttributes(context_attrs): + pass + + assert not hasattr(transaction, "_llm_context_attrs") + + +@background_task() +def test_llm_custom_attributes_prefixed_attrs(): + transaction = current_transaction() + with WithLlmCustomAttributes({"llm.test": "attr", "test1": "attr1"}): + # Validate API does not prefix attributes that already begin with "llm." + assert transaction._llm_context_attrs == {"llm.test": "attr", "llm.test1": "attr1"} + + assert not hasattr(transaction, "_llm_context_attrs") diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 8cc1fdaa8..be0226e55 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -45,6 +45,7 @@ disabled_ai_monitoring_streaming_settings, events_sans_content, events_sans_llm_metadata, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -58,6 +59,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name from newrelic.hooks.external_botocore import MODEL_EXTRACTORS @@ -161,7 +163,7 @@ def expected_invalid_access_key_error_events(model_id): def test_bedrock_chat_completion_in_txn_with_llm_metadata( set_trace_info, exercise_model, expected_events, expected_metrics ): - @validate_custom_events(expected_events) + @validate_custom_events(events_with_context_attrs(expected_events)) # One summary event, one user message, and one response message from the assistant @validate_custom_event_count(count=3) @validate_transaction_metrics( @@ -180,7 +182,8 @@ def _test(): add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + with WithLlmCustomAttributes({"context": "attr"}): + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) _test() @@ -320,7 +323,7 @@ def _test(): def test_bedrock_chat_completion_error_invalid_model( bedrock_server, set_trace_info, response_streaming, expected_metrics ): - @validate_custom_events(chat_completion_invalid_model_error_events) + @validate_custom_events(events_with_context_attrs(chat_completion_invalid_model_error_events)) @validate_error_trace_attributes( "botocore.errorfactory:ValidationException", exact_attrs={ @@ -350,22 +353,23 @@ def _test(): add_custom_attribute("non_llm_attr", "python-agent") with pytest.raises(_client_error): - if response_streaming: - stream = bedrock_server.invoke_model_with_response_stream( - body=b"{}", - modelId="does-not-exist", - accept="application/json", - contentType="application/json", - ) - for _ in stream: - pass - else: - bedrock_server.invoke_model( - body=b"{}", - modelId="does-not-exist", - accept="application/json", - contentType="application/json", - ) + with WithLlmCustomAttributes({"context": "attr"}): + if response_streaming: + stream = bedrock_server.invoke_model_with_response_stream( + body=b"{}", + modelId="does-not-exist", + accept="application/json", + contentType="application/json", + ) + for _ in stream: + pass + else: + bedrock_server.invoke_model( + body=b"{}", + modelId="does-not-exist", + accept="application/json", + contentType="application/json", + ) _test() diff --git a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py index 5f9b87b82..3bd18764f 100644 --- a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py +++ b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py @@ -19,7 +19,10 @@ ) from conftest import BOTOCORE_VERSION # pylint: disable=E0611 from testing_support.fixtures import reset_core_stats_engine, validate_attributes -from testing_support.ml_testing_utils import set_trace_info # noqa: F401 +from testing_support.ml_testing_utils import ( # noqa: F401 + events_with_context_attrs, + set_trace_info, +) from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events from testing_support.validators.validate_transaction_metrics import ( @@ -27,6 +30,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute UNSUPPORTED_LANGCHAIN_MODELS = [ @@ -105,7 +109,7 @@ def test_bedrock_chat_completion_in_txn_with_llm_metadata( expected_metrics, response_streaming, ): - @validate_custom_events(expected_events) + @validate_custom_events(events_with_context_attrs(expected_events)) # One summary event, one user message, and one response message from the assistant @validate_custom_event_count(count=6) @validate_transaction_metrics( @@ -124,6 +128,7 @@ def _test(): add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - exercise_model(prompt="Hi there!") + with WithLlmCustomAttributes({"context": "attr"}): + exercise_model(prompt="Hi there!") _test() diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py index 6d8b2943d..9a372f78d 100644 --- a/tests/mlmodel_langchain/test_chain.py +++ b/tests/mlmodel_langchain/test_chain.py @@ -31,6 +31,7 @@ disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, events_sans_content, + events_with_context_attrs, set_trace_info, ) from testing_support.validators.validate_custom_event import validate_custom_event_count @@ -46,6 +47,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name @@ -690,7 +692,7 @@ @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events_list_response) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events_list_response)) @validate_custom_event_count(count=7) @validate_transaction_metrics( name="test_chain:test_langchain_chain_list_response", @@ -720,10 +722,11 @@ def test_langchain_chain_list_response(set_trace_info, comma_separated_list_outp ] ) chain = chat_prompt | chat_openai_client | comma_separated_list_output_parser - chain.invoke( - {"text": "colors"}, - config={"metadata": {"id": "123"}}, - ) + with WithLlmCustomAttributes({"context": "attr"}): + chain.invoke( + {"text": "colors"}, + config={"metadata": {"id": "123"}}, + ) @pytest.mark.parametrize( @@ -991,7 +994,7 @@ def test_langchain_chain_error_in_openai( ): @reset_core_stats_engine() @validate_transaction_error_event_count(1) - @validate_custom_events(expected_events) + @validate_custom_events(events_with_context_attrs(expected_events)) @validate_custom_event_count(count=6) @validate_transaction_metrics( name="test_chain:test_langchain_chain_error_in_openai.._test", @@ -1012,7 +1015,8 @@ def _test(): runnable = create_function(json_schema, chat_openai_client, prompt_openai_error) with pytest.raises(openai.AuthenticationError): - getattr(runnable, call_function)(*call_function_args, **call_function_kwargs) + with WithLlmCustomAttributes({"context": "attr"}): + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs) _test() @@ -1215,7 +1219,7 @@ def test_langchain_chain_ai_monitoring_disabled( @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events_list_response) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events_list_response)) @validate_custom_event_count(count=7) @validate_transaction_metrics( name="test_chain:test_async_langchain_chain_list_response", @@ -1247,15 +1251,15 @@ def test_async_langchain_chain_list_response( ] ) chain = chat_prompt | chat_openai_client | comma_separated_list_output_parser - - loop.run_until_complete( - chain.ainvoke( - {"text": "colors"}, - config={ - "metadata": {"id": "123"}, - }, + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete( + chain.ainvoke( + {"text": "colors"}, + config={ + "metadata": {"id": "123"}, + }, + ) ) - ) @reset_core_stats_engine() @@ -1495,7 +1499,7 @@ def test_async_langchain_chain_error_in_openai( ): @reset_core_stats_engine() @validate_transaction_error_event_count(1) - @validate_custom_events(expected_events) + @validate_custom_events(events_with_context_attrs(expected_events)) @validate_custom_event_count(count=6) @validate_transaction_metrics( name="test_chain:test_async_langchain_chain_error_in_openai.._test", @@ -1516,7 +1520,8 @@ def _test(): runnable = create_function(json_schema, chat_openai_client, prompt_openai_error) with pytest.raises(openai.AuthenticationError): - loop.run_until_complete(getattr(runnable, call_function)(*call_function_args, **call_function_kwargs)) + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete(getattr(runnable, call_function)(*call_function_args, **call_function_kwargs)) _test() @@ -1740,11 +1745,11 @@ def test_multiple_async_langchain_chain( expected_events, loop, ): - call1 = expected_events.copy() + call1 = events_with_context_attrs(expected_events.copy()) call1[0][1]["request_id"] = "b1883d9d-10d6-4b67-a911-f72849704e92" call1[1][1]["request_id"] = "b1883d9d-10d6-4b67-a911-f72849704e92" call1[2][1]["request_id"] = "b1883d9d-10d6-4b67-a911-f72849704e92" - call2 = expected_events.copy() + call2 = events_with_context_attrs(expected_events.copy()) call2[0][1]["request_id"] = "a58aa0c0-c854-4657-9e7b-4cce442f3b61" call2[1][1]["request_id"] = "a58aa0c0-c854-4657-9e7b-4cce442f3b61" call2[2][1]["request_id"] = "a58aa0c0-c854-4657-9e7b-4cce442f3b61" @@ -1781,14 +1786,15 @@ def _test(): add_custom_attribute("non_llm_attr", "python-agent") runnable = create_function(json_schema, chat_openai_client, prompt) - - call1 = asyncio.ensure_future( - getattr(runnable, call_function)(*call_function_args, **call_function_kwargs), loop=loop - ) - call2 = asyncio.ensure_future( - getattr(runnable, call_function)(*call_function_args, **call_function_kwargs), loop=loop - ) - loop.run_until_complete(asyncio.gather(call1, call2)) + with WithLlmCustomAttributes({"context": "attr"}): + + call1 = asyncio.ensure_future( + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs), loop=loop + ) + call2 = asyncio.ensure_future( + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs), loop=loop + ) + loop.run_until_complete(asyncio.gather(call1, call2)) _test() diff --git a/tests/mlmodel_langchain/test_tool.py b/tests/mlmodel_langchain/test_tool.py index a153c8200..0714e84aa 100644 --- a/tests/mlmodel_langchain/test_tool.py +++ b/tests/mlmodel_langchain/test_tool.py @@ -25,6 +25,7 @@ from testing_support.ml_testing_utils import ( # noqa: F401 disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, + events_with_context_attrs, set_trace_info, ) from testing_support.validators.validate_custom_event import validate_custom_event_count @@ -40,6 +41,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.common.object_names import callable_name @@ -93,7 +95,7 @@ def events_sans_content(event): @reset_core_stats_engine() -@validate_custom_events(single_arg_tool_recorded_events) +@validate_custom_events(events_with_context_attrs(single_arg_tool_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_single_arg_tool", @@ -108,7 +110,8 @@ def events_sans_content(event): @background_task() def test_langchain_single_arg_tool(set_trace_info, single_arg_tool): set_trace_info() - single_arg_tool.run({"query": "Python Agent"}) + with WithLlmCustomAttributes({"context": "attr"}): + single_arg_tool.run({"query": "Python Agent"}) @reset_core_stats_engine() @@ -132,7 +135,7 @@ def test_langchain_single_arg_tool_no_content(set_trace_info, single_arg_tool): @reset_core_stats_engine() -@validate_custom_events(single_arg_tool_recorded_events) +@validate_custom_events(events_with_context_attrs(single_arg_tool_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_single_arg_tool_async", @@ -147,7 +150,8 @@ def test_langchain_single_arg_tool_no_content(set_trace_info, single_arg_tool): @background_task() def test_langchain_single_arg_tool_async(set_trace_info, single_arg_tool, loop): set_trace_info() - loop.run_until_complete(single_arg_tool.arun({"query": "Python Agent"})) + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete(single_arg_tool.arun({"query": "Python Agent"})) @reset_core_stats_engine() @@ -276,7 +280,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): "user": {}, }, ) -@validate_custom_events(multi_arg_error_recorded_events) +@validate_custom_events(events_with_context_attrs(multi_arg_error_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_error_in_run", @@ -292,9 +296,10 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool): with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error - multi_arg_tool.run( - {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} - ) + with WithLlmCustomAttributes({"context": "attr"}): + multi_arg_tool.run( + {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) @reset_core_stats_engine() @@ -339,7 +344,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): "user": {}, }, ) -@validate_custom_events(multi_arg_error_recorded_events) +@validate_custom_events(events_with_context_attrs(multi_arg_error_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_error_in_run_async", @@ -353,13 +358,14 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): @background_task() def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): with pytest.raises(pydantic_core._pydantic_core.ValidationError): - set_trace_info() - # Only one argument is provided while the tool expects two to create an error - loop.run_until_complete( - multi_arg_tool.arun( - {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + with WithLlmCustomAttributes({"context": "attr"}): + set_trace_info() + # Only one argument is provided while the tool expects two to create an error + loop.run_until_complete( + multi_arg_tool.arun( + {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) ) - ) @reset_core_stats_engine() diff --git a/tests/mlmodel_langchain/test_vectorstore.py b/tests/mlmodel_langchain/test_vectorstore.py index d406277f2..4a9188e74 100644 --- a/tests/mlmodel_langchain/test_vectorstore.py +++ b/tests/mlmodel_langchain/test_vectorstore.py @@ -23,6 +23,7 @@ from testing_support.ml_testing_utils import ( # noqa: F401 disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, + events_with_context_attrs, set_trace_info, ) from testing_support.validators.validate_custom_event import validate_custom_event_count @@ -35,6 +36,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name @@ -123,7 +125,7 @@ def test_vectorstore_modules_instrumented(): @reset_core_stats_engine() -@validate_custom_events(vectorstore_recorded_events) +@validate_custom_events(events_with_context_attrs(vectorstore_recorded_events)) # Two OpenAI LlmEmbedded, two LangChain LlmVectorSearch @validate_custom_event_count(count=4) @validate_transaction_metrics( @@ -143,13 +145,14 @@ def test_pdf_pagesplitter_vectorstore_in_txn(set_trace_info, embedding_openai_cl add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - script_dir = os.path.dirname(__file__) - loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) - docs = loader.load() + with WithLlmCustomAttributes({"context": "attr"}): + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() - faiss_index = FAISS.from_documents(docs, embedding_openai_client) - docs = faiss_index.similarity_search("Complete this sentence: Hello", k=1) - assert "Hello world" in docs[0].page_content + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + docs = faiss_index.similarity_search("Complete this sentence: Hello", k=1) + assert "Hello world" in docs[0].page_content @reset_core_stats_engine() @@ -214,7 +217,7 @@ def test_pdf_pagesplitter_vectorstore_ai_monitoring_disabled(set_trace_info, emb @reset_core_stats_engine() -@validate_custom_events(vectorstore_recorded_events) +@validate_custom_events(events_with_context_attrs(vectorstore_recorded_events)) # Two OpenAI LlmEmbedded, two LangChain LlmVectorSearch @validate_custom_event_count(count=4) @validate_transaction_metrics( @@ -235,13 +238,14 @@ async def _test(): add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - script_dir = os.path.dirname(__file__) - loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) - docs = loader.load() + with WithLlmCustomAttributes({"context": "attr"}): + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() - faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) - docs = await faiss_index.asimilarity_search("Complete this sentence: Hello", k=1) - return docs + faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) + docs = await faiss_index.asimilarity_search("Complete this sentence: Hello", k=1) + return docs docs = loop.run_until_complete(_test()) assert "Hello world" in docs[0].page_content @@ -341,7 +345,7 @@ async def _test(): callable_name(AssertionError), required_params={"user": ["vector_store_id"], "intrinsic": [], "agent": []}, ) -@validate_custom_events(vectorstore_error_events) +@validate_custom_events(events_with_context_attrs(vectorstore_error_events)) @validate_transaction_metrics( name="test_vectorstore:test_vectorstore_error", scoped_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], @@ -354,13 +358,14 @@ async def _test(): @background_task() def test_vectorstore_error(set_trace_info, embedding_openai_client, loop): with pytest.raises(AssertionError): - set_trace_info() - script_dir = os.path.dirname(__file__) - loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) - docs = loader.load() + with WithLlmCustomAttributes({"context": "attr"}): + set_trace_info() + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() - faiss_index = FAISS.from_documents(docs, embedding_openai_client) - faiss_index.similarity_search(query="Complete this sentence: Hello", k=-1) + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + faiss_index.similarity_search(query="Complete this sentence: Hello", k=-1) @reset_core_stats_engine() @@ -396,7 +401,7 @@ def test_vectorstore_error_no_content(set_trace_info, embedding_openai_client): callable_name(AssertionError), required_params={"user": ["vector_store_id"], "intrinsic": [], "agent": []}, ) -@validate_custom_events(vectorstore_error_events) +@validate_custom_events(events_with_context_attrs(vectorstore_error_events)) @validate_transaction_metrics( name="test_vectorstore:test_async_vectorstore_error", scoped_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], @@ -420,7 +425,8 @@ async def _test(): return docs with pytest.raises(AssertionError): - loop.run_until_complete(_test()) + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete(_test()) @reset_core_stats_engine() diff --git a/tests/mlmodel_openai/test_chat_completion.py b/tests/mlmodel_openai/test_chat_completion.py index cbeb9cdd0..e7985013d 100644 --- a/tests/mlmodel_openai/test_chat_completion.py +++ b/tests/mlmodel_openai/test_chat_completion.py @@ -25,6 +25,7 @@ disabled_ai_monitoring_streaming_settings, events_sans_content, events_sans_llm_metadata, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -35,6 +36,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute _test_openai_chat_completion_messages = ( @@ -42,6 +44,7 @@ {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, ) + chat_completion_recorded_events = [ ( {"type": "LlmChatCompletionSummary"}, @@ -130,7 +133,7 @@ @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) # One summary event, one system message, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( @@ -147,10 +150,10 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info): add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - - openai.ChatCompletion.create( - model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 - ) + with WithLlmCustomAttributes({"context": "attr"}): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) @reset_core_stats_engine() @@ -300,7 +303,7 @@ def test_openai_chat_completion_async_stream_monitoring_disabled(loop, set_trace @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) @validate_custom_event_count(count=4) @validate_transaction_metrics( "test_chat_completion:test_openai_chat_completion_async_with_llm_metadata", @@ -319,11 +322,12 @@ def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info): add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - loop.run_until_complete( - openai.ChatCompletion.acreate( - model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) ) - ) @reset_core_stats_engine() diff --git a/tests/mlmodel_openai/test_chat_completion_error.py b/tests/mlmodel_openai/test_chat_completion_error.py index d3ed79bea..fab701d1c 100644 --- a/tests/mlmodel_openai/test_chat_completion_error.py +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -24,6 +24,7 @@ add_token_count_to_events, disabled_ai_monitoring_record_content_settings, events_sans_content, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -38,6 +39,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name @@ -120,19 +122,20 @@ rollup_metrics=[("Llm/completion/OpenAI/create", 1)], background_task=True, ) -@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) @validate_custom_event_count(count=3) @background_task() def test_chat_completion_invalid_request_error_no_model(set_trace_info): with pytest.raises(openai.InvalidRequestError): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") - openai.ChatCompletion.create( - # no model provided, - messages=_test_openai_chat_completion_messages, - temperature=0.7, - max_tokens=100, - ) + with WithLlmCustomAttributes({"context": "attr"}): + openai.ChatCompletion.create( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + ) @dt_enabled diff --git a/tests/mlmodel_openai/test_chat_completion_error_v1.py b/tests/mlmodel_openai/test_chat_completion_error_v1.py index 18c2bb7da..32147691d 100644 --- a/tests/mlmodel_openai/test_chat_completion_error_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_error_v1.py @@ -23,6 +23,7 @@ add_token_count_to_events, disabled_ai_monitoring_record_content_settings, events_sans_content, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -37,6 +38,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name @@ -116,16 +118,17 @@ rollup_metrics=[("Llm/completion/OpenAI/create", 1)], background_task=True, ) -@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) @validate_custom_event_count(count=3) @background_task() def test_chat_completion_invalid_request_error_no_model(set_trace_info, sync_openai_client): with pytest.raises(TypeError): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") - sync_openai_client.chat.completions.create( - messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 - ) + with WithLlmCustomAttributes({"context": "attr"}): + sync_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) @reset_core_stats_engine() @@ -182,18 +185,20 @@ def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_inf rollup_metrics=[("Llm/completion/OpenAI/create", 1)], background_task=True, ) -@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) @validate_custom_event_count(count=3) @background_task() def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info, async_openai_client): with pytest.raises(TypeError): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") - loop.run_until_complete( - async_openai_client.chat.completions.create( - messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete( + async_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) ) - ) @reset_core_stats_engine() diff --git a/tests/mlmodel_openai/test_chat_completion_stream.py b/tests/mlmodel_openai/test_chat_completion_stream.py index 32420c78f..fce533350 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream.py +++ b/tests/mlmodel_openai/test_chat_completion_stream.py @@ -25,6 +25,7 @@ disabled_ai_monitoring_streaming_settings, events_sans_content, events_sans_llm_metadata, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -35,6 +36,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute disabled_custom_insights_settings = {"custom_insights_events.enabled": False} @@ -132,7 +134,7 @@ @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) # One summary event, one system message, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( @@ -150,15 +152,16 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info): add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - generator = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=_test_openai_chat_completion_messages, - temperature=0.7, - max_tokens=100, - stream=True, - ) - for resp in generator: - assert resp + with WithLlmCustomAttributes({"context": "attr"}): + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp @reset_core_stats_engine() @@ -323,7 +326,7 @@ async def consumer(): @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) @validate_custom_event_count(count=4) @validate_transaction_metrics( "test_chat_completion_stream:test_openai_chat_completion_async_with_llm_metadata", @@ -353,7 +356,8 @@ async def consumer(): async for resp in generator: assert resp - loop.run_until_complete(consumer()) + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete(consumer()) @reset_core_stats_engine() diff --git a/tests/mlmodel_openai/test_chat_completion_stream_error.py b/tests/mlmodel_openai/test_chat_completion_stream_error.py index 2dc0400b7..56135c5d0 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream_error.py +++ b/tests/mlmodel_openai/test_chat_completion_stream_error.py @@ -24,6 +24,7 @@ add_token_count_to_events, disabled_ai_monitoring_record_content_settings, events_sans_content, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -38,6 +39,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name @@ -119,22 +121,23 @@ rollup_metrics=[("Llm/completion/OpenAI/create", 1)], background_task=True, ) -@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) @validate_custom_event_count(count=3) @background_task() def test_chat_completion_invalid_request_error_no_model(set_trace_info): with pytest.raises(openai.InvalidRequestError): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") - generator = openai.ChatCompletion.create( - # no model provided, - messages=_test_openai_chat_completion_messages, - temperature=0.7, - max_tokens=100, - stream=True, - ) - for resp in generator: - assert resp + with WithLlmCustomAttributes({"context": "attr"}): + generator = openai.ChatCompletion.create( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp @dt_enabled @@ -489,22 +492,23 @@ def test_chat_completion_wrong_api_key_error(monkeypatch, set_trace_info): rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], background_task=True, ) -@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) @validate_custom_event_count(count=3) @background_task() def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info): with pytest.raises(openai.InvalidRequestError): - set_trace_info() - add_custom_attribute("llm.conversation_id", "my-awesome-id") - loop.run_until_complete( - openai.ChatCompletion.acreate( - # no model provided, - messages=_test_openai_chat_completion_messages, - temperature=0.7, - max_tokens=100, - stream=True, + with WithLlmCustomAttributes({"context": "attr"}): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) ) - ) @dt_enabled diff --git a/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py b/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py index 33bda04f7..0c4978c97 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py @@ -24,6 +24,7 @@ add_token_count_to_events, disabled_ai_monitoring_record_content_settings, events_sans_content, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -38,6 +39,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name @@ -117,18 +119,19 @@ rollup_metrics=[("Llm/completion/OpenAI/create", 1)], background_task=True, ) -@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) @validate_custom_event_count(count=3) @background_task() def test_chat_completion_invalid_request_error_no_model(set_trace_info, sync_openai_client): with pytest.raises(TypeError): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") - generator = sync_openai_client.chat.completions.create( - messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True - ) - for resp in generator: - assert resp + with WithLlmCustomAttributes({"context": "attr"}): + generator = sync_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True + ) + for resp in generator: + assert resp @dt_enabled @@ -188,22 +191,23 @@ def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_inf rollup_metrics=[("Llm/completion/OpenAI/create", 1)], background_task=True, ) -@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) @validate_custom_event_count(count=3) @background_task() def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info, async_openai_client): with pytest.raises(TypeError): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") + with WithLlmCustomAttributes({"context": "attr"}): - async def consumer(): - generator = await async_openai_client.chat.completions.create( - messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True - ) - async for resp in generator: - assert resp + async def consumer(): + generator = await async_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True + ) + async for resp in generator: + assert resp - loop.run_until_complete(consumer()) + loop.run_until_complete(consumer()) @dt_enabled diff --git a/tests/mlmodel_openai/test_chat_completion_stream_v1.py b/tests/mlmodel_openai/test_chat_completion_stream_v1.py index c94cbef55..5e60fd888 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_stream_v1.py @@ -27,6 +27,7 @@ disabled_ai_monitoring_streaming_settings, events_sans_content, events_sans_llm_metadata, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -37,6 +38,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute # TODO: Once instrumentation support is added for `.with_streaming_response.` @@ -142,7 +144,7 @@ @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) # One summary event, one system message, one user message, and one response message from the assistant # @validate_custom_event_count(count=4) @validate_transaction_metrics( @@ -160,16 +162,17 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info, sync_open add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - generator = sync_openai_client.chat.completions.create( - model="gpt-3.5-turbo", - messages=_test_openai_chat_completion_messages, - temperature=0.7, - max_tokens=100, - stream=True, - ) + with WithLlmCustomAttributes({"context": "attr"}): + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) - for resp in generator: - assert resp + for resp in generator: + assert resp @SKIP_IF_NO_OPENAI_WITH_STREAMING_RESPONSE @@ -471,7 +474,7 @@ async def consumer(): @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) @validate_custom_event_count(count=4) @validate_transaction_metrics( "test_chat_completion_stream_v1:test_openai_chat_completion_async_with_llm_metadata", @@ -501,7 +504,8 @@ async def consumer(): async for resp in generator: assert resp - loop.run_until_complete(consumer()) + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete(consumer()) @SKIP_IF_NO_OPENAI_WITH_STREAMING_RESPONSE diff --git a/tests/mlmodel_openai/test_chat_completion_v1.py b/tests/mlmodel_openai/test_chat_completion_v1.py index cbf631d55..dededb840 100644 --- a/tests/mlmodel_openai/test_chat_completion_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_v1.py @@ -25,6 +25,7 @@ disabled_ai_monitoring_streaming_settings, events_sans_content, events_sans_llm_metadata, + events_with_context_attrs, llm_token_count_callback, set_trace_info, ) @@ -35,6 +36,7 @@ ) from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes from newrelic.api.transaction import add_custom_attribute _test_openai_chat_completion_messages = ( @@ -130,7 +132,7 @@ @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) # One summary event, one system message, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( @@ -147,10 +149,10 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info, sync_open add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - - sync_openai_client.chat.completions.create( - model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 - ) + with WithLlmCustomAttributes({"context": "attr"}): + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) @reset_core_stats_engine() @@ -324,7 +326,7 @@ def test_openai_chat_completion_async_stream_monitoring_disabled(loop, set_trace @reset_core_stats_engine() -@validate_custom_events(chat_completion_recorded_events) +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events)) @validate_custom_event_count(count=4) @validate_transaction_metrics( "test_chat_completion_v1:test_openai_chat_completion_async_with_llm_metadata", @@ -343,11 +345,12 @@ def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info, as add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - loop.run_until_complete( - async_openai_client.chat.completions.create( - model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + with WithLlmCustomAttributes({"context": "attr"}): + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) ) - ) @reset_core_stats_engine() diff --git a/tests/testing_support/ml_testing_utils.py b/tests/testing_support/ml_testing_utils.py index a9a74af17..9d6923f95 100644 --- a/tests/testing_support/ml_testing_utils.py +++ b/tests/testing_support/ml_testing_utils.py @@ -54,6 +54,13 @@ def events_sans_llm_metadata(expected_events): return events +def events_with_context_attrs(expected_events): + events = copy.deepcopy(expected_events) + for event in events: + event[1]["llm.context"] = "attr" + return events + + @pytest.fixture(scope="session") def set_trace_info(): def _set_trace_info():