From 260cf24a53685409595e20da8b8df8815c74a323 Mon Sep 17 00:00:00 2001 From: "fangyaozheng@bytedance.com" Date: Wed, 27 Aug 2025 10:54:01 +0800 Subject: [PATCH] add input and output value in llm span --- .../extractors/llm_attributes_extractors.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py b/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py index 39828a19..9f38224b 100644 --- a/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +++ b/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py @@ -359,6 +359,18 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse: return ExtractorResponse(type="event", content=message) +def llm_input_value(params: LLMAttributesParams) -> ExtractorResponse: + return ExtractorResponse( + content=str(params.llm_request.model_dump(exclude_none=True)) + ) + + +def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse: + return ExtractorResponse( + content=str(params.llm_response.model_dump(exclude_none=True)) + ) + + LLM_ATTRIBUTES = { # ===== request attributes ===== "gen_ai.request.model": llm_gen_ai_request_model, @@ -383,6 +395,8 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse: # attributes "gen_ai.prompt": llm_gen_ai_prompt, "gen_ai.completion": llm_gen_ai_completion, + "input.value": llm_input_value, # TLS required + "output.value": llm_output_value, # TLS required # ===== usage ===== "gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens, "gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,