From 7106cef998f79690d237315d77b8d2082a12c02e Mon Sep 17 00:00:00 2001 From: "wuqingfu.528" Date: Fri, 29 Aug 2025 17:50:28 +0800 Subject: [PATCH] fix(tracing): modify the index of the part on APMPlus --- .../attributes/extractors/llm_attributes_extractors.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py b/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py index 87d87920..e854447b 100644 --- a/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +++ b/veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py @@ -103,10 +103,11 @@ def llm_gen_ai_usage_cache_read_input_tokens( def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse: # a part is a message messages: list[dict] = [] + idx = 0 for content in params.llm_request.contents: if content.parts: - for idx, part in enumerate(content.parts): + for part in content.parts: message = {} # text part if part.text: @@ -141,6 +142,7 @@ def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse: if message: messages.append(message) + idx += 1 return ExtractorResponse(content=messages)