|
| 1 | +# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +import json |
| 16 | +from typing import AsyncGenerator |
| 17 | + |
| 18 | +from google.adk.agents.invocation_context import InvocationContext |
| 19 | +from google.adk.events import Event |
| 20 | +from google.adk.models.llm_request import LlmRequest |
| 21 | +from google.adk.models.llm_response import LlmResponse |
| 22 | +from google.genai.types import Content, Part |
| 23 | + |
| 24 | +from veadk import Agent |
| 25 | +from veadk.agents.supervise_agent import generate_advice |
| 26 | +from veadk.flows.supervise_single_flow import SupervisorSingleFlow |
| 27 | +from veadk.utils.logger import get_logger |
| 28 | + |
| 29 | +logger = get_logger(__name__) |
| 30 | + |
| 31 | + |
| 32 | +class SupervisorAutoFlow(SupervisorSingleFlow): |
| 33 | + def __init__(self, supervised_agent: Agent): |
| 34 | + super().__init__(supervised_agent) |
| 35 | + |
| 36 | + async def _call_llm_async( |
| 37 | + self, |
| 38 | + invocation_context: InvocationContext, |
| 39 | + llm_request: LlmRequest, |
| 40 | + model_response_event: Event, |
| 41 | + ) -> AsyncGenerator[LlmResponse, None]: |
| 42 | + supervisor_response = await generate_advice(self._supervisor, llm_request) |
| 43 | + logger.debug(f"Advice from supervisor: {supervisor_response}") |
| 44 | + |
| 45 | + advice_and_reason = json.loads(supervisor_response) |
| 46 | + |
| 47 | + if advice_and_reason["advice"]: |
| 48 | + logger.debug("Add supervisor advice to llm request.") |
| 49 | + llm_request.contents.append( |
| 50 | + Content( |
| 51 | + parts=[ |
| 52 | + Part( |
| 53 | + text=f"""Message from your supervisor (not user): {advice_and_reason["advice"]}, the corresponding reason is {advice_and_reason["reason"]} |
| 54 | +
|
| 55 | + Please follow the advice and reason above to optimize your actions. |
| 56 | + """ |
| 57 | + ) |
| 58 | + ], |
| 59 | + role="user", |
| 60 | + ) |
| 61 | + ) |
| 62 | + else: |
| 63 | + logger.info( |
| 64 | + f"Supervisor advice is empty, reason: {advice_and_reason['reason']}. Skip adding to llm request." |
| 65 | + ) |
| 66 | + |
| 67 | + async for llm_response in super()._call_llm_async( |
| 68 | + invocation_context, llm_request, model_response_event |
| 69 | + ): |
| 70 | + yield llm_response |
0 commit comments