|
| 1 | +# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +import math |
| 16 | +import re |
| 17 | +import string |
| 18 | +import sympy |
| 19 | +from typing import Any, cast |
| 20 | +from veadk.agent import Agent |
| 21 | +from veadk.runner import Runner |
| 22 | +from veadk.memory.short_term_memory import ShortTermMemory |
| 23 | +from agentlightning import ( |
| 24 | + LLM, |
| 25 | + LitAgent, |
| 26 | + NamedResources, |
| 27 | + Trainer, |
| 28 | + reward, |
| 29 | +) |
| 30 | + |
| 31 | + |
| 32 | +def normalize_option(option: str) -> str: |
| 33 | + """ |
| 34 | + >>> normalize_option(" (A) \n") |
| 35 | + 'A' |
| 36 | + """ |
| 37 | + return re.sub(r"(\s+|\(|\))", "", option) |
| 38 | + |
| 39 | + |
| 40 | +def is_option_result(result: str) -> bool: |
| 41 | + """ |
| 42 | + >>> is_option_result(" A) \n") |
| 43 | + True |
| 44 | + >>> is_option_result(" 23/7 ") |
| 45 | + False |
| 46 | + """ |
| 47 | + return normalize_option(result) in list(string.ascii_letters) |
| 48 | + |
| 49 | + |
| 50 | +def float_eval(input_str: str) -> float: |
| 51 | + if " = around " in input_str: |
| 52 | + input_str = input_str.split(" = around ")[0] |
| 53 | + expr = sympy.parse_expr(input_str, evaluate=True) |
| 54 | + return float(expr.evalf()) |
| 55 | + |
| 56 | + |
| 57 | +def scalar_are_results_same(pred_result: str, true_result: str, rel_tol: float) -> bool: |
| 58 | + pred_result = str(pred_result) if pred_result is not None else "" # type: ignore |
| 59 | + true_result = str(true_result) if true_result is not None else "" # type: ignore |
| 60 | + |
| 61 | + if pred_result.strip() == true_result.strip(): |
| 62 | + return True |
| 63 | + |
| 64 | + if is_option_result(true_result): |
| 65 | + # The task is to select correct option |
| 66 | + true_result = normalize_option(true_result) |
| 67 | + pred_result = normalize_option(pred_result) |
| 68 | + return pred_result == true_result |
| 69 | + |
| 70 | + # The task is to calculate the result as a number |
| 71 | + try: |
| 72 | + pred_float = float_eval(pred_result) |
| 73 | + true_float = float_eval(true_result) |
| 74 | + return math.isclose(pred_float, true_float, rel_tol=rel_tol) |
| 75 | + except Exception: |
| 76 | + pass |
| 77 | + |
| 78 | + return False |
| 79 | + |
| 80 | + |
| 81 | +@reward |
| 82 | +async def eval(prediction: str, ground_truth: str) -> float: |
| 83 | + return float(scalar_are_results_same(prediction, ground_truth, 1e-2)) |
| 84 | + |
| 85 | + |
| 86 | +class CalcAgent(LitAgent[Any]): |
| 87 | + async def training_rollout_async( |
| 88 | + self, task: Any, rollout_id: str, resources: NamedResources |
| 89 | + ) -> Any: # type: ignore |
| 90 | + llm: LLM = cast(LLM, resources.get("main_llm")) |
| 91 | + calc_agent = Agent( |
| 92 | + name="CalcAgent", |
| 93 | + description="An agent that can perform calculations to answer questions.", |
| 94 | + instruction="You are a helpful assistant that can perform mathematical calculations to answer questions accurately.", |
| 95 | + model_provider="openai", |
| 96 | + model=llm.model, |
| 97 | + api_base=llm.endpoint, |
| 98 | + api_key="", |
| 99 | + ) |
| 100 | + runner = Runner( |
| 101 | + agent=calc_agent, |
| 102 | + short_term_memory=ShortTermMemory(), |
| 103 | + app_name="calc_agent", |
| 104 | + user_id="veadk_default_user", |
| 105 | + ) |
| 106 | + try: |
| 107 | + output_format = "Output the answer when you are ready. The answer should be surrounded by three sharps (`###`), in the form of ### ANSWER: <answer> ###." |
| 108 | + prompt = task["question"] + " " + output_format |
| 109 | + result = await runner.run( |
| 110 | + session_id=rollout_id, |
| 111 | + messages=prompt, |
| 112 | + ) |
| 113 | + # evaluate |
| 114 | + answer = re.search( |
| 115 | + r"###\s*ANSWER:\s*(.+?)(\s*###|$)", result.messages[-1].content |
| 116 | + ) # type: ignore |
| 117 | + if answer: |
| 118 | + answer = answer.group(1) |
| 119 | + else: |
| 120 | + answer = result.messages[-1].content # type: ignore |
| 121 | + except Exception as e: |
| 122 | + print("Failure:", str(e)) |
| 123 | + answer = "None" |
| 124 | + reward = await eval( |
| 125 | + answer, str(task["result"]) |
| 126 | + ) # reward is tracked with the decorator # type: ignore |
| 127 | + print( |
| 128 | + "answer: {} ground_truth: {} reward: {}".format( |
| 129 | + answer, task["result"], reward |
| 130 | + ) |
| 131 | + ) # type: ignore |
| 132 | + |
| 133 | + async def validation_rollout_async( |
| 134 | + self, task: Any, rollout_id: str, resources: NamedResources |
| 135 | + ) -> Any: # type: ignore |
| 136 | + llm: LLM = cast(LLM, resources.get("main_llm")) |
| 137 | + resources = { |
| 138 | + "main_llm": LLM( |
| 139 | + endpoint=llm.endpoint, |
| 140 | + model=llm.model, |
| 141 | + sampling_parameters={"temperature": 0}, |
| 142 | + ) |
| 143 | + } |
| 144 | + return await self.training_rollout_async(task, rollout_id, resources) |
| 145 | + |
| 146 | + |
| 147 | +if __name__ == "__main__": |
| 148 | + Trainer(n_workers=10).fit(CalcAgent(), "http://localhost:9999/") |
0 commit comments