diff --git a/mxAgent/agent_sdk/executor/recipe_executor/executor.py b/mxAgent/agent_sdk/executor/recipe_executor/executor.py index fd3befb6e29f3aa29817e685728b21872827cf91..ee8ca765b97261e2d9c8da22dcaa257c762f8914 100644 --- a/mxAgent/agent_sdk/executor/recipe_executor/executor.py +++ b/mxAgent/agent_sdk/executor/recipe_executor/executor.py @@ -15,7 +15,7 @@ import yaml from loguru import logger from agent_sdk.executor.recipe_executor.state import ExecutorState, WorkSpace -from agent_sdk.executor.recipe_executor.parser import Node, Parser, ActionGraph +from agent_sdk.executor.recipe_executor.parser import Node, Parser from agent_sdk.executor.recipe_executor.sop import SopHandler from agent_sdk.executor.common import ERROR_MAP, ErrorType, PlanStrategyType @@ -104,9 +104,32 @@ class AgentExecutor(): action.input[key] = value return action + @staticmethod + def get_leaves_result(state): + summary = "" + for name in state.leaves_tasks: + content = state.sop_graph[name].description + res = state.workspace.variable_space.get(name, "") + summary += f"content: {content}\n" + summary += f"result: {json.dumps(res, ensure_ascii=False)}\n" + return summary + + @staticmethod + def get_leaves_node(nodes): + leaves = [] + for key, _ in nodes.items(): + is_leaf = True + for _, other_node in nodes.items(): + if key in other_node.dependency: + is_leaf = False + break + if is_leaf: + leaves.append(key) + return leaves + def get_executable_actions(self, executor_state): done_actions = executor_state.done_tasks - graph = executor_state.sop_graph.actions + graph = executor_state.sop_graph activated = executor_state.activated_tasks independent_actions = [] for task_name in executor_state.remaining_tasks: @@ -142,7 +165,7 @@ class AgentExecutor(): return pending_actions def run_task(self, action, executor_state, llm): - graph = executor_state.sop_graph.actions + graph = executor_state.sop_graph sop_handler = self.operation_handler mommt = datetime.now(tz=timezone.utc) logger.debug(f'{action.name} start: {mommt.strftime("%Y-%m-%d %H:%M:%S")}') @@ -177,7 +200,7 @@ class AgentExecutor(): for future in as_completed(thread_list): with self.lock: self.update_history(future.result(), executor_state) - return executor_state.workspace.get_last_result() + return self.get_leaves_result(executor_state) # 而此处的写法不关注next,关注dependency def run(self, content): @@ -238,11 +261,12 @@ class AgentExecutor(): execute_state = ExecutorState() workspace = WorkSpace(operation_history=[], variable_space={}) - graph = ActionGraph(operations) execute_state.workspace = workspace - execute_state.sop_graph = graph - execute_state.remaining_tasks = {k for k, _ in graph.actions.items()} + execute_state.sop_graph = operations + execute_state.start_node_id = operations[next(iter(operations))] + execute_state.remaining_tasks = {k for k, _ in operations.items()} + execute_state.leaves_tasks = self.get_leaves_node(operations) return execute_state diff --git a/mxAgent/agent_sdk/executor/recipe_executor/parser.py b/mxAgent/agent_sdk/executor/recipe_executor/parser.py index ee4ac08950c6884952af772c3e012ee96d9a2b76..40a19999c7896df31c8680ccaefe65e4fdb6adb5 100644 --- a/mxAgent/agent_sdk/executor/recipe_executor/parser.py +++ b/mxAgent/agent_sdk/executor/recipe_executor/parser.py @@ -9,6 +9,7 @@ from loguru import logger class Node: def __init__(self, name, + description, content, prompt, step, @@ -22,6 +23,7 @@ class Node: ): self.step = step self.name = name + self.description = description self.input = content self.prompt = prompt self.activate = activate @@ -39,7 +41,8 @@ class Parser: def construct_graph(nodes): graph = OrderedDict() for operation in nodes: - operation = Node(name=operation['name'], + node = Node(name=operation['name'], + description=operation["description"], step=operation['step'], content=operation['input'], prompt=operation['prompt'], @@ -48,7 +51,7 @@ class Parser: output=operation["output"], strategy=operation["strategy"], activate=operation["activate"]) - graph[operation.name] = operation + graph[node.name] = node return graph def parse(self, raw_dict): @@ -57,7 +60,7 @@ class Parser: node = { "step": None, "name": None, - "goal": None, + "description": None, "input": None, # 输入 "prompt": None, # llm的prompt "activate": None, # 执行引擎,表达式验证规则 @@ -104,14 +107,3 @@ class Parser: else: raise TypeError("Invalid inputs type") return output - - - - -class ActionGraph: - def __init__(self, actions) -> None: - self.actions = actions - if len(actions) == 0: - self.start_node_id = 0 - else: - self.start_node_id = actions[next(iter(actions))] diff --git a/mxAgent/agent_sdk/executor/recipe_executor/state.py b/mxAgent/agent_sdk/executor/recipe_executor/state.py index eeae064e7416539197ea7c9426ca4d925fbf77ca..d0e916507375155c8618c5ddc39da3c67b294c09 100644 --- a/mxAgent/agent_sdk/executor/recipe_executor/state.py +++ b/mxAgent/agent_sdk/executor/recipe_executor/state.py @@ -48,6 +48,8 @@ class ExecutorState: self.activated_tasks = set() self.done_tasks = set() self.activate_actions = [] + self.leaves_tasks = [] # 图中所有的叶子节点 + self.start_node_id = None # 开始节点 # for sop planning type self.init_query = "" @@ -73,7 +75,7 @@ class WorkSpace: operation_history, variable_space): self.operation_history = operation_history - self.variable_space = variable_space + self.variable_space = variable_space # 记录执行结果,workspace[node_name]的值正常是一个json,由[out_param]指定某一个具体的参数值 self.last_operation = "" def update(self, history): diff --git a/mxAgent/samples/tools/tool_query_accommodations.py b/mxAgent/samples/tools/tool_query_accommodations.py index 8c1edfdb1ddbe9f85515ce40218c67f998b8211d..5326b35371f92149bc2e952b5f023047e197e608 100644 --- a/mxAgent/samples/tools/tool_query_accommodations.py +++ b/mxAgent/samples/tools/tool_query_accommodations.py @@ -50,16 +50,17 @@ class QueryAccommodations(API): logger.debug(f"search accommodation key words: {','.join(keys)}") prompt = """你是一个擅长文字处理和信息总结的智能助手,你的任务是将提供的网页信息进行总结,并以精简的文本的形式进行返回, - 请添加适当的词语,使得语句内容连贯,通顺。提供的信息是为用户推荐的酒店的网页数据, - 请总结网页信息,要求从以下几个方面考虑: - 1. 酒店的地理位置,星级、评分,评价,品牌信息 - 2. 不同的户型对应的价格、房间情况,对入住用户的要求等 - 并给出一到两个例子介绍这些情况 - 若输入的内容没有包含有效的酒店和住宿信息,请统一返回:【无】 - 下面是网页的输入: - {input} - 请生成总结: - """ +请添加适当的词语,使得语句内容连贯,通顺。提供的信息是为用户推荐的酒店的网页数据, +请总结网页信息,要求从以下几个方面考虑: +1. 酒店的地理位置,星级、评分,评价,品牌信息 +2. 不同的户型对应的价格、房间情况,对入住用户的要求等 +并给出一到两个例子介绍这些情况。 +必须注意下面这个要求: +若输入的内容没有包含相关的酒店和住宿信息,请统一返回:【无】 +下面是网页的输入: +{input} +请生成总结: +""" try: filtered = filter_website_keywords(keys) filtered.append("住宿") diff --git a/mxAgent/samples/tools/tool_query_attractions.py b/mxAgent/samples/tools/tool_query_attractions.py index b590d9542b9e9b8243f867c1ef3bf5040f4a2c16..ff16f98a0c72bda0a3cbbea56e6e49ef35d3fb79 100644 --- a/mxAgent/samples/tools/tool_query_attractions.py +++ b/mxAgent/samples/tools/tool_query_attractions.py @@ -54,19 +54,20 @@ class QueryAttractions(API): keys = [destination, scene, scene_type, requirement] summary_prompt = """你是一个擅长于网页信息总结的智能助手,提供的网页是关于旅游规划的信息,现在已经从网页中获取到了相关的文字内容信息,你需要从网页中找到与**景区**介绍相关的内容,并进行提取, - 你务必保证提取的内容都来自所提供的文本,保证结果的客观性,真实性。 - 网页中可能包含多个景点的介绍,你需要以YAML文件的格式返回,每个景点的返回的参数和格式如下: - **输出格式**: - - name: xx - introduction: xx - **参数介绍**: - name:景点名称 - introduction:精简的景区介绍,可以从以下这些方面阐述:景点的基本情况、历史文化等信息、景区门票信息、景区开放时间、景区的联系方式、预约方式以及链接,景区对游客的要求等。 - **注意** - 请注意:不要添加任何解释或注释,且严格遵循YAML格式 - 下面是提供的网页文本信息: - {input} - 请开始生成: +你务必保证提取的内容都来自所提供的文本,保证结果的客观性,真实性。 +网页中可能包含多个景点的介绍,你需要以YAML文件的格式返回,每个景点的返回的参数和格式如下: +**输出格式**: +- name: xx +introduction: xx +**参数介绍**: +name:景点名称 +introduction:精简的景区介绍,可以从以下这些方面阐述:景点的基本情况、历史文化等信息、景区门票信息、景区开放时间、景区的联系方式、预约方式以及链接,景区对游客的要求等。 +**注意** +1. 请注意:不要添加任何解释或注释,且严格遵循YAML格式 +2. 若输入的内容没有包含旅游和景点相关信息,请统一返回:【无】 +下面是提供的网页文本信息: +{input} +请开始生成: """ try: filtered = filter_website_keywords(keys) diff --git a/mxAgent/samples/tools/tool_query_transports.py b/mxAgent/samples/tools/tool_query_transports.py index bf048f822da60f492865b2b1ab9f4bf08a60e5a9..8a6a383ebb29a08dc626c76262efd90fff81ca7a 100644 --- a/mxAgent/samples/tools/tool_query_transports.py +++ b/mxAgent/samples/tools/tool_query_transports.py @@ -55,13 +55,15 @@ class QueryTransports(API): keys = [prefix, req, travel_mode] prompt = """你的任务是将提供的网页信息进行总结,并以精简的文本的形式进行返回, - 请添加适当的词语,使得语句内容连贯,通顺。输入是为用户查询的航班、高铁等交通数据,请将这些信息总结 - 请总结网页信息,要求从以下几个方面考虑: - 总结出航班或者高铁的价格区间、需要时长区间、并给出2-3例子,介绍车次、时间、时长、价格等 - 下面是网页的输入: - {input} - 请生成总结: - """ +请添加适当的词语,使得语句内容连贯,通顺。输入是为用户查询的航班、高铁等交通数据,请将这些信息总结 +请总结网页信息,要求从以下几个方面考虑: +总结出航班或者高铁的价格区间、需要时长区间、并给出2-3例子,介绍车次、时间、时长、价格等 +请特别注意: +若输入的内容没有包含相关的交通信息,请统一返回:【无】 +下面是网页的输入: +{input} +请生成总结: +""" filtered = filter_website_keywords(keys) webs = WebSummary.web_summary( filtered, search_num=3, summary_num=3, summary_prompt=prompt, llm=llm) diff --git a/mxAgent/samples/tools/tool_query_weather.py b/mxAgent/samples/tools/tool_query_weather.py index 2df61a1db40f6892cf1693bebc07f7fb1fa37876..03d877a289f42960d24b9eb3152666a2cb79dcc6 100644 --- a/mxAgent/samples/tools/tool_query_weather.py +++ b/mxAgent/samples/tools/tool_query_weather.py @@ -88,7 +88,7 @@ class QueryWeather(API): # 精简输入 key_keeps = [ 'day_weather', 'day_wind_direction', 'day_wind_power', - 'max_degree', 'min_degree', 'night_weather', 'night_wind_direction', 'night_wind_power' + 'max_degree', 'min_degree', 'night_weather' ] summary_copy = [] for key, info in weekly_weather.items(): @@ -98,7 +98,7 @@ class QueryWeather(API): info_keeps = {k: info[k] for k in key_keeps if k in info} daily[time] = info_keeps summary_copy.append(daily) - return summary_copy + return summary_copy[:5] def format_request_param(self, data, weather_type): for key, value in data.items(): @@ -164,7 +164,7 @@ class QueryWeather(API): weather_summary = summary_copy[gaps + 1:] if len(weather_summary) == 0: - weather_summary = "**抱歉,我最多只能查询最近7天的天气情况,例如下面是我将为你提供最近的天气预报**:\n" + \ + weather_summary = "**抱歉,我最多只能查询最近5天的天气情况,例如下面是我将为你提供最近的天气预报**:\n" + \ json.dumps(summary_copy, ensure_ascii=False, indent=4) res = { 'forecast': weather_summary diff --git a/mxAgent/samples/tools/web_summary_api.py b/mxAgent/samples/tools/web_summary_api.py index c4e040dd38c077f069422405e9be68f378e6cb0d..a91c69f5697fc2e69e2cf7b3a077f371a0828b77 100644 --- a/mxAgent/samples/tools/web_summary_api.py +++ b/mxAgent/samples/tools/web_summary_api.py @@ -91,6 +91,8 @@ class WebSummary: if 'PleaseenableJSanddisableanyadblocker' in text: text = "" except Exception as e: + if len(str(e)) == 0: + e = Exception(f"error type: {type(e)}, when request the url: {url}") logger.error(e) return '', e if len(text) == 0: @@ -115,7 +117,7 @@ class WebSummary: content, err = asyncio.run(cls.get_details(url, summary_prompt)) except Exception as e: logger.error(e) - if not isinstance(content, str) or len(content) == 0: + if not isinstance(content, str) or len(content) == 0 or content == "【无】": web_summary['snippet'] = snippet else: web_summary['content'] = content @@ -130,7 +132,7 @@ class WebSummary: mommt = datetime.now(tz=timezone.utc) logger.debug(f"start duck duck go search: {mommt.strftime('%Y-%m-%d %H:%M:%S')}") if isinstance(keys, list): - keys = ",".join(keys) + keys = " ".join(keys) search_result = call_duck_duck_go_search(keys, search_num) mommt = datetime.now(tz=timezone.utc) logger.debug(f"finish duck duck go search: {mommt.strftime('%Y-%m-%d %H:%M:%S')}")