From 27b1453628125033d82b6bd1ce8187dc6a684f78 Mon Sep 17 00:00:00 2001 From: huxinjia Date: Wed, 27 Nov 2024 18:14:19 +0800 Subject: [PATCH 1/3] fix bugs --- mxAgent/READM.md | 64 +++++++++++-------- mxAgent/agent_sdk/llms/openai_compatible.py | 10 ++- mxAgent/agent_sdk/toolmngt/api.py | 4 +- mxAgent/samples/tools/common.py | 1 - mxAgent/samples/tools/duck_search.py | 31 +-------- mxAgent/samples/tools/web_summary_api.py | 4 +- .../samples/travel_agent_demo/travelagent.py | 4 +- 7 files changed, 51 insertions(+), 67 deletions(-) diff --git a/mxAgent/READM.md b/mxAgent/READM.md index 9a8b15323..bfda6889e 100644 --- a/mxAgent/READM.md +++ b/mxAgent/READM.md @@ -4,6 +4,12 @@ mxAgent是一个基于LLMs的通用Agent框架,应用多种框架解决不同 提供一套Agent实现框架,让用户可以通过框架搭建自己的Agent应用 ### 1.Router Agent 提供意图识别的能力,用户可预设意图的分类,通过Router Agent给出具体问题的分类结果,用于设别不同的问题场景。 +使用示例: +``` +cd mxAgent +export PYTHONPATH=. +python samples/basic_demo/intent_router.py --model_name xxx --base_url xxx --api_key xxxx +``` ### 2.Recipe Agent 设置复杂问题执行的workflow,在解决具体问题时,将workflow翻译成有向无环图的的节点编排,通过并行的方式执行节点。 适用于有相对固定workflow的复杂问题场景。 @@ -17,7 +23,7 @@ Recipe Agent利用用户所提供的流程指导和工具,使用LLMs生成SOP ``` cd mxAgent export PYTHONPATH=. -python samplestravel_agenttravelagent.py +python samples/travel_agent_demo/travelagent.py --model_name xxx --base_url xxx --api_key xxxx ``` ### 3.ReAct Agent @@ -26,6 +32,11 @@ python samplestravel_agenttravelagent.py 2)执行工具调用,得到工具执行结果 3)将工具执行结果应用于下一次的模型思考 4)循环上述过程,直到模型认为问题得到解决 +``` +cd mxAgent +export PYTHONPATH=. +python samples/basic_demo/agent_test.py --model_name xxx --base_url xxx --api_key xxxx +``` ### 4.Single Action Agent 通过模型反思、调用工具执行,总结工具结果的执行轨迹,完成一次复杂问题的处理。Single Action Agent使用一次工具调用帮助完成复杂问题解决 @@ -33,7 +44,7 @@ python samplestravel_agenttravelagent.py ``` cd mxAgent export PYTHONPATH=. -python samplestraj_generate_test.py +python samples/basic_demo/traj_generate_test.py --model_name xxx --base_url xxx --api_key xxxx ``` ## @@ -41,37 +52,38 @@ python samplestraj_generate_test.py ## 二、接口使用方法 ### 1.模型使用 1.1. get_llm_backend -参数含义 - ---- ----- -backend推理模型后台类型,当前取值 -base_urlOpenAI客户端推理模型地址 -api_keyOpenAI客户端api key -llm_name推理模型名称 + +| 参数 | 含义 | +| ---- | ----- | +| backend | 推理模型后台类型,当前取值 +| base_url | OpenAI客户端推理模型地址 +| api_key |OpenAI客户端api key +| llm_name | 推理模型名称 1.2. run -参数含义取值 - ---- ----- --- -prompt模型输入的prompt字符串或数组 -ismessage是否为对话信息是否为对话信息,默认值Falsebool值 +| 参数 | 含义 | 取值 +| ---- | ----- | ---| +| prompt |模型输入的prompt | 字符串或数组 +| ismessage | 是否为对话信息是否为对话信息,默认值False | bool值 更多模型后处理参数可参考transformers文档 ### 2.agent接口 2.1 初始化参数 -参数含义取值类型 - ---- ----- --- - llm 模型客户端 OpenAICompatibleLLM - prompt agent提示词 字符串 - tool_list 工具列表 列表 - max_steps agent执行最大步数 int - max_token_number 模型生成最大token数 int - max_context_len 模型最大上下文token数 int - max_retries 最多重启次数 int - example agent推理生成示例 字符串 - reflect_prompt agent反思提示 字符串 - recipe recipe agent参考的标准工作流 字符串 - intents 分类的意图及含义 dict - final_prompt 最终生成总结的提示 字符串 +| 参数| 含义 | 取值类型| +| ---- | -----| --- | +| llm |模型客户端 |OpenAICompatibleLLM +| prompt |agent提示词 |字符串 +| tool_list | 工具列表 | 列表 +| max_steps | agent执行最大步数 | int +| max_token_number | 模型生成最大token数 | int +| max_context_len | 模型最大上下文token数 | int +| max_retries | 最多重启次数 | int +| example | agent推理生成示例 | 字符串 +| reflect_prompt | agent反思提示 | 字符串 +| recipe | recipe agent参考的标准工作流 | 字符串 +| intents | 分类的意图及含义 | dict +| final_prompt | 最终生成总结的提示 | 字符串 diff --git a/mxAgent/agent_sdk/llms/openai_compatible.py b/mxAgent/agent_sdk/llms/openai_compatible.py index fd51e13e6..9d819e604 100644 --- a/mxAgent/agent_sdk/llms/openai_compatible.py +++ b/mxAgent/agent_sdk/llms/openai_compatible.py @@ -3,8 +3,6 @@ from typing import List, Dict, Optional -from langchain_openai import OpenAI -from loguru import logger from openai import OpenAI @@ -16,10 +14,10 @@ class OpenAICompatibleLLM: self.client = OpenAI(api_key=api_key, base_url=base_url) def run(self, prompt, ismessage=False, **kwargs): - temperature = kwargs.get("temperature", 0.1) - stop = kwargs.get("stop", None) - max_tokens = kwargs.get("max_tokens", 4096) - stream = kwargs.get("stream", False) + temperature = kwargs.pop("temperature", 0.1) + stop = kwargs.pop("stop", None) + max_tokens = kwargs.pop("max_tokens", 4096) + stream = kwargs.pop("stream", False) messages = prompt if ismessage else [{"role": "user", "content": prompt}] if stream: diff --git a/mxAgent/agent_sdk/toolmngt/api.py b/mxAgent/agent_sdk/toolmngt/api.py index c270ee7c1..ea0c624fd 100644 --- a/mxAgent/agent_sdk/toolmngt/api.py +++ b/mxAgent/agent_sdk/toolmngt/api.py @@ -3,7 +3,7 @@ from abc import ABC from dataclasses import dataclass, field from typing import Union, Tuple -from agent_sdk.utils.constant import THOUGHT, ACTION, OBSERVATION, ACTION_INPUT +from agent_sdk.common.constant import THOUGHT, ACTION, OBSERVATION, ACTION_INPUT from loguru import logger @@ -76,7 +76,7 @@ class API(ABC): JSON objects.') def check_api_call_correctness(self, response, groundtruth) -> bool: - raise NotImplementedError + return response.exception is not None def call(self, input_parameter: dict, **kwargs): raise NotImplementedError diff --git a/mxAgent/samples/tools/common.py b/mxAgent/samples/tools/common.py index 8fea8f41b..2ed383491 100644 --- a/mxAgent/samples/tools/common.py +++ b/mxAgent/samples/tools/common.py @@ -13,7 +13,6 @@ def get_website_summary(keys, prompt, llm): if isinstance(val, list): it = flatten(val) filtered.append(it) - filtered.append(val) if len(filtered) == 0: raise Exception("keywords has no been found") diff --git a/mxAgent/samples/tools/duck_search.py b/mxAgent/samples/tools/duck_search.py index eb0b7291d..7da42455e 100644 --- a/mxAgent/samples/tools/duck_search.py +++ b/mxAgent/samples/tools/duck_search.py @@ -3,8 +3,7 @@ from typing import List import re from langchain_community.tools import DuckDuckGoSearchResults -from langchain_community.utilities import DuckDuckGoSearchAPIWrapper -from utils.log import LOGGER as logger +from loguru import logger from toolmngt.api import API @@ -51,35 +50,11 @@ class DuckDuckGoSearch(API): return self.make_response(input_parameter, results="", exception=exception) -def format_result(res): - snippet_idx = res.find("snippet:") - title_idx = res.find("title:") - link_idx = res.find("link:") - snippet = res[snippet_idx + len("snippet:"):title_idx] - title = res[title_idx + len("title:"):link_idx] - link = res[link_idx + len("link:"):] - return {"snippet": snippet.replace("", "").replace("", ""), "title": title, "url": link} - - def call_duck_duck_go_search(query: str, count: int) -> List[str]: try: logger.debug(f"search DuckDuckGo({query}, {count})") - duck_duck_search = DuckDuckGoSearchAPIWrapper(max_results=count) - search = DuckDuckGoSearchResults(api_wrapper=duck_duck_search) - bingsearch_results = [] - temp = search.run(query) - logger.debug(temp) - - snippets = re.findall(r'\[(.*?)\]', temp) - snippets = [snippet.strip() for snippet in snippets] - - for snippet in snippets: - if len(snippet) == 0: - continue - logger.debug(f"snippet is {snippet}") - bingsearch_results.append(format_result(snippet)) - logger.success(f"{json.dumps(bingsearch_results, indent=4)}") + search = DuckDuckGoSearchResults(output_format="list", max_results=count) + return search.invoke(query) except Exception as e: logger.error(e) return [] - return bingsearch_results diff --git a/mxAgent/samples/tools/web_summary_api.py b/mxAgent/samples/tools/web_summary_api.py index dfb38dd37..d7bcce9fb 100644 --- a/mxAgent/samples/tools/web_summary_api.py +++ b/mxAgent/samples/tools/web_summary_api.py @@ -103,7 +103,7 @@ class WebSummary: @classmethod def summary_call(cls, web, max_summary_number, summary_prompt): title = web.get("title", "") - url = web.get("url") + url = web.get("link") snippet = web.get("snippet", "") web_summary = {} if url is None: @@ -112,7 +112,7 @@ class WebSummary: web_summary['title'] = title web_summary['url'] = url try: - content = asyncio.run(cls.get_details(url, summary_prompt)) + content, err = asyncio.run(cls.get_details(url, summary_prompt)) except Exception as e: logger.error(e) if not isinstance(content, str) or len(content) == 0: diff --git a/mxAgent/samples/travel_agent_demo/travelagent.py b/mxAgent/samples/travel_agent_demo/travelagent.py index 01f04d057..06edca9dc 100644 --- a/mxAgent/samples/travel_agent_demo/travelagent.py +++ b/mxAgent/samples/travel_agent_demo/travelagent.py @@ -177,11 +177,11 @@ if __name__ == "__main__": llm_name = args.pop("model_name") llm = get_llm_backend(backend=BACKEND_OPENAI_COMPATIBLE, - api_base=base_url, api_key=api_key, llm_name=llm_name).run + base_url=base_url, api_key=api_key, llm_name=llm_name).run query = "帮我制定一份从北京到上海6天的旅游计划" travel_agent = TravelAgent() - res = travel_agent.run(query, stream=True) + res = travel_agent.run(query, stream=False) if isinstance(res, AgentRunResult): logger.info("-----------run agent success-------------") logger.info(res.answer) -- Gitee From a48e4553fd14939633daf957bb222dee5d8d2604 Mon Sep 17 00:00:00 2001 From: huxinjia Date: Wed, 27 Nov 2024 18:15:55 +0800 Subject: [PATCH 2/3] add requirement --- mxAgent/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mxAgent/requirements.txt b/mxAgent/requirements.txt index c96356599..701bfc1bf 100644 --- a/mxAgent/requirements.txt +++ b/mxAgent/requirements.txt @@ -14,6 +14,6 @@ rouge langchain_openai colorlog rouge-score -langchain-community +langchain-community=0.38 loguru tiktoken \ No newline at end of file -- Gitee From 8b61afec2aef70f3376da6e483bae4705f9a959f Mon Sep 17 00:00:00 2001 From: huxinjia Date: Wed, 27 Nov 2024 18:30:26 +0800 Subject: [PATCH 3/3] fix llm module --- mxAgent/agent_sdk/llms/openai_compatible.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mxAgent/agent_sdk/llms/openai_compatible.py b/mxAgent/agent_sdk/llms/openai_compatible.py index 9d819e604..3ab79cea9 100644 --- a/mxAgent/agent_sdk/llms/openai_compatible.py +++ b/mxAgent/agent_sdk/llms/openai_compatible.py @@ -1,7 +1,3 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved. - - from typing import List, Dict, Optional from openai import OpenAI -- Gitee