From 8c7ee1763e64dffc8f421a775329889cdb9c7fbe Mon Sep 17 00:00:00 2001 From: congxi Date: Thu, 30 May 2024 09:17:05 +0800 Subject: [PATCH] =?UTF-8?q?[feature]=20=E5=88=A0=E9=99=A4=E5=A4=9A?= =?UTF-8?q?=E4=BD=99=E4=BB=A3=E7=A0=81=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Phenomenon and reproduction steps** **Root cause and solution** **Impactions** **Test method** **Affected branch(es)** * main **Checklist** - [ ] Dependencies update required - [ ] Common bug (similar problem in other repo) --- deploy/templates/configmap.yaml | 22 +-- deploy/templates/deployment.yaml | 18 +- deploy/templates/deployment_mysql.yaml | 30 ---- deploy/templates/service.yaml | 4 +- deploy/templates/service_mysql.yaml | 17 -- df-llm-agent/database/init.sql | 44 +---- df-llm-agent/llm_agent_app/app.py | 34 ---- df-llm-agent/llm_agent_app/llm_agent.py | 117 +------------ .../llm_agent_app/llm_agent_config.py | 154 +----------------- df-llm-agent/llm_agent_app/worker.py | 20 --- df-llm-agent/resource_app/__init__.py | 0 df-llm-agent/resource_app/app.py | 33 ---- df-llm-agent/resource_app/resource.py | 108 ------------ df-llm-agent/resource_app/worker.py | 35 ---- df-llm-agent/server.py | 2 - etc/df-llm-agent.yaml | 20 +-- 16 files changed, 29 insertions(+), 629 deletions(-) delete mode 100644 deploy/templates/deployment_mysql.yaml delete mode 100644 deploy/templates/service_mysql.yaml delete mode 100644 df-llm-agent/resource_app/__init__.py delete mode 100644 df-llm-agent/resource_app/app.py delete mode 100644 df-llm-agent/resource_app/resource.py delete mode 100644 df-llm-agent/resource_app/worker.py diff --git a/deploy/templates/configmap.yaml b/deploy/templates/configmap.yaml index a590fed..e7482df 100644 --- a/deploy/templates/configmap.yaml +++ b/deploy/templates/configmap.yaml @@ -2,7 +2,7 @@ kind: ConfigMap apiVersion: v1 metadata: - name: df-llm-agent + name: df-llm-agent-ce namespace: deepflow data: df-llm-agent.yaml: |- @@ -12,14 +12,6 @@ data: log_file: /var/log/df-llm-agent.log log_level: info instance_path: /root/df-llm-agent - - redis: - host: - - redis - cluster_enabled: False # True,False - port: 6379 - db: 6 - password: "password123" mysql: user_name: root user_password: password123 @@ -27,11 +19,11 @@ data: port: 30130 database: deepflow_llm ai: - enable: False # True,False + enable: False platforms: - - enable: False platform: "azure" + enable: False model: "gpt" api_type: "azure" api_key: "" @@ -40,25 +32,25 @@ data: engine_name: - "" - - enable: False platform: "aliyun" + enable: False model: "dashscope" api_key: "" engine_name: - "qwen-turbo" - "qwen-plus" - - enable: False platform: "baidu" + enable: False model: "qianfan" api_key: "" - api_secre: "" + api_secret: "" engine_name: - "ERNIE-Bot" - "ERNIE-Bot-turbo" - - enable: False platform: "zhipu" + enable: False model: "zhipuai" api_key: "" engine_name: diff --git a/deploy/templates/deployment.yaml b/deploy/templates/deployment.yaml index 6574ff3..28b6862 100644 --- a/deploy/templates/deployment.yaml +++ b/deploy/templates/deployment.yaml @@ -2,40 +2,40 @@ kind: Deployment apiVersion: apps/v1 metadata: - name: df-llm-agent-deployment + name: df-llm-agent-deployment-ce namespace: deepflow labels: - component: df-llm-agent + component: df-llm-agent-ce spec: replicas: 1 selector: matchLabels: - component: df-llm-agent + component: df-llm-agent-ce template: metadata: labels: - component: df-llm-agent + component: df-llm-agent-ce spec: hostNetwork: true dnsPolicy: ClusterFirstWithHostNet containers: - - name: df-llm-agent - image: "hub.deepflow.yunshan.net/dev/df-llm-agent:latest" + - name: df-llm-agent-ce + image: "ghcr.io/deepflowio/deepflow-ce/deepflowio-stella-agent-ce:latest" imagePullPolicy: Always volumeMounts: - name: debug-path mountPath: /root/debug - mountPath: /etc/web/df-llm-agent.yaml - name: web-volumes-df-llm-agent + name: web-volumes-df-llm-agent-ce subPath: df-llm-agent.yaml volumes: - name: debug-path hostPath: type: DirectoryOrCreate path: /usr/local/deepflow/debug/ - - name: web-volumes-df-llm-agent + - name: web-volumes-df-llm-agent-ce configMap: - name: df-llm-agent + name: df-llm-agent-ce items: - key: df-llm-agent.yaml path: df-llm-agent.yaml diff --git a/deploy/templates/deployment_mysql.yaml b/deploy/templates/deployment_mysql.yaml deleted file mode 100644 index 8feb903..0000000 --- a/deploy/templates/deployment_mysql.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: df-llm-agent-deployment-mysql - namespace: cx-test - labels: - component: df-llm-agent-mysql -spec: - replicas: 1 - selector: - matchLabels: - component: df-llm-agent-mysql - template: - metadata: - labels: - component: df-llm-agent-mysql - spec: - hostNetwork: false - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: df-llm-agent-mysql - image: "mysql:8.0.26" - imagePullPolicy: Always - env: - - name: MYSQL_ROOT_PASSWORD - value: password123 - ports: - - containerPort: 3306 - name: mysql diff --git a/deploy/templates/service.yaml b/deploy/templates/service.yaml index 8086457..860c19e 100644 --- a/deploy/templates/service.yaml +++ b/deploy/templates/service.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: - name: df-llm-agent + name: df-llm-agent-ce namespace: deepflow spec: ports: @@ -13,5 +13,5 @@ spec: protocol: TCP name: http selector: - component: df-llm-agent + component: df-llm-agent-ce type: NodePort diff --git a/deploy/templates/service_mysql.yaml b/deploy/templates/service_mysql.yaml deleted file mode 100644 index 9c98b36..0000000 --- a/deploy/templates/service_mysql.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# df-llm-agent Service -apiVersion: v1 -kind: Service -metadata: - name: df-llm-agent-mysql - namespace: cx-test -spec: - ports: - - port: 30306 - nodePort: 30306 - targetPort: 3306 - protocol: TCP - name: mysql - selector: - component: df-llm-agent-mysql - type: NodePort diff --git a/df-llm-agent/database/init.sql b/df-llm-agent/database/init.sql index 2151501..68398a6 100644 --- a/df-llm-agent/database/init.sql +++ b/df-llm-agent/database/init.sql @@ -50,46 +50,4 @@ CREATE TABLE IF NOT EXISTS score( updated_at DATETIME NOT NULL ON UPDATE CURRENT_TIMESTAMP DEFAULT CURRENT_TIMESTAMP, INDEX `user_id` (`user_id`) )ENGINE=innodb DEFAULT CHARSET=utf8 COMMENT='评分'; -TRUNCATE TABLE score; - - -CREATE TABLE IF NOT EXISTS llm_config( - id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, - user_id INTEGER NOT NULL DEFAULT 0, - `platform` VARCHAR(64) NOT NULL DEFAULT '' COMMENT '平台: azure、aliyun、baidu、tencent', - `model` VARCHAR(64) NOT NULL DEFAULT '' COMMENT '模型统称,例如 azure: openai, 阿里: dashscope, 百度: qianfan, 腾讯: hyllm', - `model_info` VARCHAR(255) NOT NULL DEFAULT '' COMMENT '模型相关具体细节', - `key` VARCHAR(64) NOT NULL DEFAULT '' COMMENT '模型需要的配置项', - `value` VARCHAR(256) NOT NULL DEFAULT '' COMMENT '模型需要的配置项的值', - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at DATETIME NOT NULL ON UPDATE CURRENT_TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - INDEX `user_id` (`user_id`), - UNIQUE KEY `unique_engine` (`user_id`,`platform`,`model`,`key`,`value`) -)ENGINE=innodb DEFAULT CHARSET=utf8 COMMENT='llm 配置'; -TRUNCATE TABLE llm_config; - -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'azure','gpt','enable',0); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'azure','gpt','api_key',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'azure','gpt','api_type','azure'); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'azure','gpt','api_base',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'azure','gpt','api_version',''); --- INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'azure','gpt','engine_name',''); - --- INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'openai','gpt','enable',0); --- INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'openai','gpt','api_key',''); - -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'aliyun','dashscope','enable',0); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'aliyun','dashscope','api_key',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'aliyun','dashscope','engine_name','qwen-turbo'); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'aliyun','dashscope','engine_name','qwen-plus'); - - -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'baidu','qianfan','enable',0); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'baidu','qianfan','api_key',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'baidu','qianfan','api_secre',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'baidu','qianfan','engine_name','ERNIE-Bot'); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'baidu','qianfan','engine_name','ERNIE-Bot-turbo'); - -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'zhipu','zhipuai','enable',0); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'zhipu','zhipuai','api_key',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'zhipu','zhipuai','engine_name','chatglm_turbo'); +TRUNCATE TABLE score; \ No newline at end of file diff --git a/df-llm-agent/llm_agent_app/app.py b/df-llm-agent/llm_agent_app/app.py index c1ad56b..3660c9e 100644 --- a/df-llm-agent/llm_agent_app/app.py +++ b/df-llm-agent/llm_agent_app/app.py @@ -10,14 +10,6 @@ # 配置必须属于一个用户 -@llm_agent_app.route("/llm_agent_config", methods=["POST"]) -@wrap_resp -async def llm_agent_config_add(request): - worker = app_worker(request) - res = await worker.llm_agent_config_add() - return res - - @llm_agent_app.route("/llm_agent_config/", name="by_platform") @llm_agent_app.route("/llm_agent_config") @wrap_resp @@ -27,22 +19,6 @@ async def llm_agent_config_list(request, platform=""): return res -@llm_agent_app.route("/llm_agent_config//", methods=["PATCH"]) -@wrap_resp -async def llm_agent_config_update(request, platform="", key_name=""): - worker = app_worker(request) - res = await worker.llm_agent_config_update(platform, key_name) - return res - - -@llm_agent_app.route("/llm_agent_config//", methods=["DELETE"]) -@wrap_resp -async def llm_agent_config_delete(request, platform="", engine_name=""): - worker = app_worker(request) - res = await worker.llm_agent_config_delete(platform, engine_name) - return res - - # 流返回 @llm_agent_app.route("/ai/stream/", methods=["POST"]) @wrap_resp_stream @@ -51,13 +27,3 @@ async def llm_agent_stream_system(request, platform=""): # 流数据 res = await worker.llm_agent_stream(platform) return res - -# 组件 - - -@llm_agent_app.route("/ai/azure/deepflow/modules", methods=["POST"]) -@wrap_resp -async def llm_agent_module(request): - worker = app_worker(request) - res = await worker.llm_agent_module(platform='azure') - return res diff --git a/df-llm-agent/llm_agent_app/llm_agent.py b/df-llm-agent/llm_agent_app/llm_agent.py index c0e67fa..ce5c358 100644 --- a/df-llm-agent/llm_agent_app/llm_agent.py +++ b/df-llm-agent/llm_agent_app/llm_agent.py @@ -61,7 +61,7 @@ # qianfan需要的配置文件 # { # "api_key": "api_key_xxx", -# "api_secre":"api_key_secre" +# "api_secret":"api_key_secret" # "engine_name": "ERNIE-Bot", # "engine_name": "ERNIE-Bot-turbo", # } @@ -332,7 +332,7 @@ async def assistant_base( self.engine_name = engine_config.get("engine_name") elif platform == "baidu": - for key in ("api_key", "api_secre", "engine_name"): + for key in ("api_key", "api_secret", "engine_name"): if key not in engine_config or engine_config.get(f"{key}", "") == "": raise BadRequestException( "DATA_NOT_FOUND", @@ -340,7 +340,7 @@ async def assistant_base( ) qianfan.AK(engine_config.get("api_key")) - qianfan.SK(engine_config.get("api_secre")) + qianfan.SK(engine_config.get("api_secret")) self.engine_name = engine_config.get("engine_name") elif platform == "zhipu": @@ -750,116 +750,5 @@ async def generate_data(output, output_all): return generate_data(output, output_all) - # 组件 - async def module(self, user_info, platform, engine_name, args, data): - # 校验 - await self.assistant_base(user_info, platform, engine_name, "langchain", args, data - ) - - # 开始时间 - working_start_time = datetime.datetime.now() - - # azure模型 - llm = self.langchain_azure_client - - # 字符串返回 - output_parser = StrOutputParser() - - # 基础模板:问题分类指令模板 - prompt = PromptTemplate.from_template( - """鉴于下面的用户问题,将其分类为“LangChain”、“LLM”或“其他”。不要用超过一个字来回应. - - <问题> - {question} - - - 分类:""" - ) - - chain = prompt | llm | output_parser - - # res = chain.invoke({"question": "如何使用llm?"}) - # res = chain.invoke({"question": "如何使用langchain?"}) - # print(res) - - # 子链 - # langchain专家 - langchain_chain = ( - PromptTemplate.from_template( - """您是 langchain 方面的专家。 \ - 回答问题时始终以“正如官方文档中所诉”开头。 \ - 回答以下问题: - - 问题: {question} - 回答:""" - ) - | llm - | output_parser - ) - - # 大模型专家 - llm_chain = ( - PromptTemplate.from_template( - """您是 llm大模型 方面的专家。 \ - 回答问题时始终以“以我所知道的所有模型”开头。 \ - 回答以下问题: - - 问题: {question} - 回答:""" - ) - | llm - | output_parser - ) - - # 默认链 - general_chain = ( - PromptTemplate.from_template( - """回答以下问题: - - 问题: {question} - 回答:""" - ) - | llm - | output_parser - ) - - branch = RunnableBranch( - # 多个子链依次追加 - (lambda x: "llm" in x["topic"].lower(), llm_chain), - (lambda x: "langchain" in x["topic"].lower(), langchain_chain), - # 默认链 - general_chain, - ) - - full_chain = {"topic": chain, "question": lambda x: x["question"]} | branch - - # 问题 - question = self.query[0]["content"] - - try: - # 异步一次性返回 - res = await full_chain.ainvoke({"question": question}) - - # 结束时间 - working_end_time = datetime.datetime.now() - all_time = working_end_time.timestamp() - working_start_time.timestamp() - msg = {} - msg["user_id"] = self.user_info.get("ID", 0) - msg["start_time"] = f"{working_start_time}" - msg["end_time"] = f"{working_end_time}" - msg["all_time"] = all_time - msg["return"] = res - # 记录并返回 - self.output.append(f"{res}") - self.output_all.append(msg) - - return res - except Exception as e: - self.output_all.append(e) - raise BadRequestException("APP_ERROR", const.APP_ERROR, f"{e}") - finally: - # 更新会话记录,包括所有返回可记录数据 - await self.chat_up() - llm_agent_worker = llmAgentWorker() diff --git a/df-llm-agent/llm_agent_app/llm_agent_config.py b/df-llm-agent/llm_agent_app/llm_agent_config.py index 9e5f8af..101d9e3 100644 --- a/df-llm-agent/llm_agent_app/llm_agent_config.py +++ b/df-llm-agent/llm_agent_app/llm_agent_config.py @@ -17,47 +17,6 @@ class llmAgentConfigWorker(object): async def verify_data(data): pass - # 新增 - @classmethod - async def llm_agent_config_add(cls, user_info, args, data): - user_id = user_info.get("ID", 0) - user_type = user_info.get("TYPE", 0) - if user_type != 1: - raise BadRequestException("SERVER_ERROR", f"{const.SERVER_ERROR}, 没有权限,只允许超管") - - data_info = {} - data_info["user_id"] = user_id - data_info["platform"] = data.get("platform", "") - data_info["model"] = data.get("model", "") - data_info["model_info"] = data.get("model_info", "") - data_info["key"] = data.get("key_name", "") - data_info["value"] = data.get("value", "") - - # 其他key必须唯一 - # todoing key 需要给范围 - if data_info["key"] != "engine_name": - - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 只允许添加模型引擎配置") - # where_info = {} - # where_info = {} - # where_info["user_id"] = user_id - # where_info["platform"] = data_info["platform"] - # where_info["key"] = data_info["key"] - - # res = await db_models.LlmConfig.exists(**where_info) - - # if res: - # raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 该配置项在一个平台下必须唯一") - - try: - await db_models.LlmConfig.create(**data_info) - except Exception as e: - raise BadRequestException("SQL_ERROR", const.SQL_ERROR, f"{e}") - - log.info(f"用户:{user_id}, 添加配置, 数据: {data_info}") - - return True - # 获取所有配置 @classmethod async def llm_agent_config_list(cls, user_info, platform=""): @@ -68,9 +27,9 @@ async def llm_agent_config_list(cls, user_info, platform=""): if platform: data_info["platform"] = platform + res = {} if hasattr(config, "platforms"): res_config = config.platforms - res = {} for _info in res_config: __info = {} _platform = _info.get('platform', '') @@ -84,118 +43,7 @@ async def llm_agent_config_list(cls, user_info, platform=""): res[f"{_platform}"] = __info - return res - - try: - if data_info: - sql_res = await db_models.LlmConfig.filter(**data_info).all() - else: - sql_res = await db_models.LlmConfig.all() - except Exception as e: - raise BadRequestException("SQL_ERROR", const.SQL_ERROR, f"{e}") - - res = {} - for v in sql_res: - _config = dict(v) - - _platform = _config.get("platform") - _model = _config.get("model") - _model_info = _config.get("model_info") - - _key = _config.get("key") - _value = _config.get("value") - - # 列表过滤敏感数据,详情不过滤 - if platform == "": - if _key not in ["enable", "engine_name"]: - continue - - _merge_config = {} - _merge_config["model"] = _model - _merge_config["model_info"] = _model_info - - if f"{_key}" == "engine_name": - _merge_config[f"{_key}"] = [_value] - else: - _merge_config[f"{_key}"] = _value - - if _platform not in res: - res[f"{_platform}"] = _merge_config - else: - if f"{_key}" == "engine_name": - if f"{_key}" not in res[f"{_platform}"]: - res[f"{_platform}"][f"{_key}"] = [_value] - else: - res[f"{_platform}"][f"{_key}"] += [_value] - else: - res[f"{_platform}"][f"{_key}"] = _value - return res - # 更新 - @classmethod - async def llm_agent_config_update(cls, user_info, platform, key_name, args, data): - user_id = user_info.get("ID", 0) - user_type = user_info.get("TYPE", 0) - if user_type != 1: - raise BadRequestException("SERVER_ERROR", f"{const.SERVER_ERROR}, 没有权限,只允许超管") - - if not platform or not key_name: - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 缺失平台名或key") - - # engine可以删除和新增。修改意义不大 - if key_name == "engine_name": - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 引擎值不支持修改") - - where_info = {} - where_info["user_id"] = user_id - where_info["platform"] = platform - where_info["key"] = key_name - - data_info = {} - data_info["value"] = data.get("value", "") - - try: - res = await db_models.LlmConfig.get(**where_info) - if res: - await db_models.LlmConfig.filter(**where_info).update(**data_info) - else: - raise BadRequestException("DATA_NOT_FOUND", const.DATA_NOT_FOUND) - - except Exception as e: - raise BadRequestException("SQL_ERROR", const.SQL_ERROR, f"{e}") - - log.info(f"用户:{user_id}, 更新配置, 数据: {data_info}") - - return True - - # 删除 - @classmethod - async def llm_agent_config_delete(cls, user_info, platform, engine_name, args, data): - user_id = user_info.get("ID", 0) - user_type = user_info.get("TYPE", 0) - if user_type != 1: - raise BadRequestException("SERVER_ERROR", f"{const.SERVER_ERROR}, 没有权限,只允许超管") - - where_info = {} - where_info["user_id"] = user_id - where_info["platform"] = platform - where_info["key"] = "engine_name" - where_info["value"] = engine_name - - # 其他配置不允许删除,只有引擎可以 - try: - llm_config_exist = await db_models.LlmConfig.filter(**where_info).count() - except Exception as e: - raise BadRequestException("SQL_ERROR", const.SQL_ERROR, f"{e}") - - if llm_config_exist > 0: - await db_models.LlmConfig.filter(**where_info).delete() - else: - raise BadRequestException("DATA_NOT_FOUND", const.DATA_NOT_FOUND) - - log.info(f"用户:{user_id}, 删除配置, 查询数据: {where_info}") - return True - llm_agent_config_worker = llmAgentConfigWorker() diff --git a/df-llm-agent/llm_agent_app/worker.py b/df-llm-agent/llm_agent_app/worker.py index 0b3715c..c636f91 100644 --- a/df-llm-agent/llm_agent_app/worker.py +++ b/df-llm-agent/llm_agent_app/worker.py @@ -24,22 +24,10 @@ def __init__(self, request): self.data = request.json self.user_info = request.ctx.user - async def llm_agent_config_add(self): - # 校验todoing - return await llm_agent_config_worker.llm_agent_config_add(self.user_info, self.args, self.data) - async def llm_agent_config_list(self, platform=""): return await llm_agent_config_worker.llm_agent_config_list(self.user_info, platform) - async def llm_agent_config_update(self, platform="", key_name=""): - # 校验todoing - return await llm_agent_config_worker.llm_agent_config_update(self.user_info, platform, key_name, self.args, self.data) - - async def llm_agent_config_delete(self, platform="", engine_name=""): - # 校验todoing - return await llm_agent_config_worker.llm_agent_config_delete(self.user_info, platform, engine_name, self.args, self.data) - # 流处理 async def llm_agent_stream(self, platform, prompt_type=''): # 校验todoing @@ -48,11 +36,3 @@ async def llm_agent_stream(self, platform, prompt_type=''): raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 缺失使用的引擎名称") llm_agent_worker = llmAgentWorker(self.request) return await llm_agent_worker.assistant_stream(self.user_info, platform, engine_name, prompt_type, self.args, self.data) - - # 组件 - async def llm_agent_module(self, platform): - engine_name = self.args.get("engine", "") - if not engine_name: - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 缺失使用的引擎名称") - llm_agent_worker = llmAgentWorker(self.request) - return await llm_agent_worker.module(self.user_info, platform, engine_name, self.args, self.data) diff --git a/df-llm-agent/resource_app/__init__.py b/df-llm-agent/resource_app/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/df-llm-agent/resource_app/app.py b/df-llm-agent/resource_app/app.py deleted file mode 100644 index d089985..0000000 --- a/df-llm-agent/resource_app/app.py +++ /dev/null @@ -1,33 +0,0 @@ -from sanic import Blueprint -from resource_app.worker import app_worker -from utils.response_tools import wrap_resp -from const import API_PREFIX - -resource_app = Blueprint("resource", url_prefix=API_PREFIX) - - -# 获取文件 -@resource_app.route("/img/", name="resource_get") -@wrap_resp -async def img_get(request, hash_name=""): - worker = app_worker(request) - res = await worker.img_get(hash_name) - return res - - -# 提交文件 -@resource_app.route("/imgs", methods=["POST"], name="resource_add") -@wrap_resp -async def img_add(request): - worker = app_worker(request) - res = await worker.img_add() - return res - - -# 提交文件编码 -@resource_app.route("/imgs/b64", methods=["POST"], name="resource_add_b64") -@wrap_resp -async def img_add_b64(request): - worker = app_worker(request) - res = await worker.img_add_b64() - return res diff --git a/df-llm-agent/resource_app/resource.py b/df-llm-agent/resource_app/resource.py deleted file mode 100644 index 6cb5798..0000000 --- a/df-llm-agent/resource_app/resource.py +++ /dev/null @@ -1,108 +0,0 @@ -from tortoise.transactions import atomic, in_transaction -from tortoise.exceptions import BaseORMException, OperationalError -from tortoise.expressions import Q -from tortoise.functions import Coalesce, Count, Length, Lower, Min, Sum, Trim, Upper -from exception import BadRequestException -import const -from database import db_models -from utils import logger -import traceback -import datetime -import json -import os -import base64 -from utils.tools import generate_uuid - -from database.cache import cache - -log = logger.getLogger(__name__) - - -class resourceWorker(object): - - # 校验数据 - @staticmethod - async def verify_data(file): - pass - - # 新增 - @classmethod - async def img_add(cls, user_info, args, files): - - allow_type = ['.jpg', '.png'] - - file = files.get('file') - - file_name = file.name - file_extension = os.path.splitext(file_name)[1] - - if file_extension not in allow_type: - raise BadRequestException("FAIL", f"{const.FAIL}: 文件类型格式错误", f"{const.FAIL}: 文件类型格式错误") - - # 文件大小,byte - filesize = len(file.body) - if filesize > 10 * 1024 * 1024: - # 10M - raise BadRequestException("FAIL", f"{const.FAIL}: 文件大小超过最大值", f"{const.FAIL}: 文件大小超过最大值") - - time_now = datetime.datetime.now() - create_time = time_now.strftime(const.DATE_PATTEN) - expire_time = (time_now + datetime.timedelta(days=1)).strftime(const.DATE_PATTEN) - - cache_client = await cache.GetCacheServer() - - # 记录 - lcuuid = generate_uuid() - - file_info = {} - file_info['lcuuid'] = lcuuid - file_info['name'] = file_name - # file_info['size'] = filesize - file_info['create_time'] = create_time - file_info['expire_time'] = expire_time - file_info['img'] = f"data:{file.type};base64,{base64.b64encode(file.body).decode('utf8')}" - try: - await cache_client.hmset(lcuuid, file_info) - await cache_client.expire(lcuuid, 86400) - return await cache_client.hgetall(lcuuid) - except Exception: - raise BadRequestException("FAIL", f"{const.FAIL}: 保存图片失败", f"{const.FAIL}: {traceback.format_exc()}") - - @classmethod - async def img_add_b64(cls, user_info, args, data): - time_now = datetime.datetime.now() - create_time = time_now.strftime(const.DATE_PATTEN) - expire_time = (time_now + datetime.timedelta(days=1)).strftime(const.DATE_PATTEN) - - cache_client = await cache.GetCacheServer() - - # 记录 - lcuuid = generate_uuid() - - file_info = {} - file_info['lcuuid'] = lcuuid - file_info['name'] = data.get('name', lcuuid) - # file_info['size'] = data.get('size') - file_info['create_time'] = create_time - file_info['expire_time'] = expire_time - file_info['img'] = data.get('img') - - try: - await cache_client.hmset(lcuuid, file_info) - await cache_client.expire(lcuuid, 86400) - return await cache_client.hgetall(lcuuid) - except Exception: - raise BadRequestException("FAIL", f"{const.FAIL}: 保存图片失败", f"{const.FAIL}: {traceback.format_exc()}") - - @classmethod - async def img_get(cls, user_info, args, hash_name): - cache_client = await cache.GetCacheServer() - try: - res = await cache_client.hgetall(hash_name) - # image_bytes = base64.b64decode(image_base64) - return res - except Exception: - raise BadRequestException("FAIL", f"{const.FAIL}: 获取图片失败", f"{const.FAIL}: {traceback.format_exc()}") - - -resource_worker = resourceWorker() diff --git a/df-llm-agent/resource_app/worker.py b/df-llm-agent/resource_app/worker.py deleted file mode 100644 index 9625090..0000000 --- a/df-llm-agent/resource_app/worker.py +++ /dev/null @@ -1,35 +0,0 @@ -from exception import BadRequestException -import const -from resource_app.resource import resource_worker -from config import config -from utils.curl_tools import curl_tools -from utils import logger -import json -import time - -log = logger.getLogger(__name__) - - -class app_worker(object): - - def __init__(self, request): - self.request = request - self.args = request.args - if self.args: - for k, v in self.args.items(): - self.args[k] = [i for i in v] - self.user_info = request.ctx.user - - async def img_add(self): - # 校验todoing - files = self.request.files - return await resource_worker.img_add(self.user_info, self.args, files) - - async def img_add_b64(self): - # 校验todoing - data = self.request.json - return await resource_worker.img_add_b64(self.user_info, self.args, data) - - async def img_get(self, hash_name=""): - # 校验todoing - return await resource_worker.img_get(self.user_info, self.args, hash_name) diff --git a/df-llm-agent/server.py b/df-llm-agent/server.py index 5a94eff..44a1a50 100644 --- a/df-llm-agent/server.py +++ b/df-llm-agent/server.py @@ -9,7 +9,6 @@ from chat_record_app.app import chat_record_app from llm_agent_app.app import llm_agent_app from health_app.app import health_app -from resource_app.app import resource_app import traceback from utils import logger @@ -22,7 +21,6 @@ app.blueprint(health_app) app.blueprint(llm_agent_app) app.blueprint(chat_record_app) -app.blueprint(resource_app) CORS(app) diff --git a/etc/df-llm-agent.yaml b/etc/df-llm-agent.yaml index 9124d6d..91e6931 100644 --- a/etc/df-llm-agent.yaml +++ b/etc/df-llm-agent.yaml @@ -4,14 +4,6 @@ sql_show: False log_file: /var/log/df-llm-agent.log log_level: info instance_path: /root/df-llm-agent - -redis: - host: - - redis - cluster_enabled: False # True,False - port: 6379 - db: 7 - password: "password123" mysql: user_name: root user_password: password123 @@ -19,11 +11,11 @@ mysql: port: 30130 database: deepflow_llm ai: - enable: False # True,False + enable: False platforms: - - enable: False platform: "azure" + enable: False model: "gpt" api_type: "azure" api_key: "" @@ -32,25 +24,25 @@ ai: engine_name: - "" - - enable: False platform: "aliyun" + enable: False model: "dashscope" api_key: "" engine_name: - "qwen-turbo" - "qwen-plus" - - enable: False platform: "baidu" + enable: False model: "qianfan" api_key: "" - api_secre: "" + api_secret: "" engine_name: - "ERNIE-Bot" - "ERNIE-Bot-turbo" - - enable: False platform: "zhipu" + enable: False model: "zhipuai" api_key: "" engine_name: