Compare commits

...

2 Commits

  1. 256
      api_llm_generate/api_llm_generate.py

@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
import time
import httpx
import json
def fetch_llm_generate_task(odoo_base_url="http://localhost:8069"):
"""
Odoo 接口获取待生成的 LLM 任务数据
Args:
odoo_base_url: Odoo 服务地址默认 http://localhost:8069
Returns:
dict or None: 返回获取到的任务数据包含以下字段
- id: 记录ID
- final_prompt: 最终提示词
- system_prompt: 系统提示词
- user_prompt: 用户提示词
如果没有待生成的任务返回 None
"""
url = f"{odoo_base_url}/api/local-llm-generate/fetch"
headers = {
"Content-Type": "application/json"
}
payload = {}
try:
print(f"[INFO] 正在请求 Odoo 接口: {url}")
response = httpx.post(
url,
headers=headers,
json=payload,
timeout=30
)
response.raise_for_status()
data = response.json()
# print(f"[INFO] 接口返回数据: {json.dumps(data, ensure_ascii=False, indent=2)}")
# 检查是否返回了 {"data": null}(没有待生成任务)
if isinstance(data, dict) and 'data' in data and data['data'] is None:
print("[INFO] 当前没有待生成的 LLM 任务")
exit(1)
# 返回获取到的任务数据(包含 id, final_prompt, system_prompt, user_prompt)
if isinstance(data, dict) and 'id' in data:
print(f"[SUCCESS] 获取到任务数据, record_id={data['id']}")
return data
print(f"[WARNING] 返回数据格式不符合预期: {data}")
return None
except httpx.TimeoutException:
print("[ERROR] 请求超时")
return None
except httpx.ConnectError as e:
print(f"[ERROR] 连接错误: {e}")
return None
except httpx.HTTPStatusError as e:
print(f"[ERROR] HTTP 错误: {e}")
return None
except Exception as e:
print(f"[ERROR] 发生未知错误: {e}")
return None
def call_llm_generate(task_data, max_retries=3):
"""
调用本地 Ollama LLM 生成内容
Args:
task_data: 包含 final_prompt, system_prompt, user_prompt 的字典
max_retries: 最大重试次数默认 max_retries
Returns:
str: 生成的内容如果失败返回 "failed"
"""
if task_data is None:
print("[ERROR] task_data 为 None,无法调用 LLM")
return "failed"
# 提取 task_data 中的字段
final_prompt = task_data.get('final_prompt', '')
system_prompt = task_data.get('system_prompt', '')
user_prompt = task_data.get('user_prompt', '')
record_id = task_data.get('id', 'unknown')
print(f"[INFO] 开始为 record_id={record_id} 调用本地 Ollama")
# Ollama API 地址
ollama_url = "http://localhost:11434/v1/chat/completions"
# 组装 messages
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
if final_prompt:
messages.append({"role": "user", "content": final_prompt})
# 请求体
payload = {
"model": "kimi-k2.5:cloud",
"messages": messages,
"stream": False
}
# 重试机制
for attempt in range(1, max_retries + 1):
try:
print(f"[INFO] 第 {attempt} 次尝试调用 Ollama...")
response = httpx.post(
ollama_url,
json=payload,
timeout=600
)
response.raise_for_status()
result = response.json()
# 提取生成的内容
if "choices" in result and len(result["choices"]) > 0:
generated_content = result["choices"][0].get("message", {}).get("content", "")
if generated_content:
print(f"[SUCCESS] 第 {attempt} 次尝试成功,生成长度: {len(generated_content)} 字符")
return generated_content
else:
print(f"[WARNING] 第 {attempt} 次尝试返回空内容")
else:
print(f"[WARNING] 第 {attempt} 次尝试返回格式异常: {result}")
except httpx.TimeoutException:
print(f"[ERROR] 第 {attempt} 次尝试超时")
except httpx.ConnectError as e:
print(f"[ERROR] 第 {attempt} 次尝试连接错误: {e}")
except httpx.HTTPStatusError as e:
print(f"[ERROR] 第 {attempt} 次尝试 HTTP 错误: {e}")
except Exception as e:
print(f"[ERROR] 第 {attempt} 次尝试发生未知错误: {e}")
# 如果不是最后一次尝试,等待后继续
if attempt < max_retries:
import time
print(f"[INFO] 等待 2 秒后重试...")
time.sleep(2)
# 所有重试都失败了
print(f"[FAILED] 经过 {max_retries} 次尝试后仍然失败")
return "failed"
def upload_llm_result(odoo_base_url, upload_data):
"""
上传 LLM 生成结果到 Odoo
Args:
odoo_base_url: Odoo 服务地址
upload_data: 包含以下字段的字典
- record_id: int, 记录ID
- status: str, "success" "failed"
- result: str, 成功时的生成内容
- error_message: str, 失败时的错误信息
Returns:
bool: 上传成功返回 True失败返回 False
"""
url = f"{odoo_base_url}/api/alpha-idea/result"
headers = {
"Content-Type": "application/json"
}
try:
print(f"[INFO] 正在上传结果到 Odoo: {url}")
print(f"[INFO] 上传数据: {json.dumps(upload_data, ensure_ascii=False)}")
response = httpx.post(
url,
headers=headers,
json=upload_data,
timeout=30
)
response.raise_for_status()
result = response.json()
print(f"[INFO] 上传结果接口返回: {json.dumps(result, ensure_ascii=False)}")
# 检查返回结果
if isinstance(result, dict):
if result.get('status') == 'success' or result.get('success') is True:
print(f"[SUCCESS] 结果上传成功")
return True
print(f"[WARNING] 结果上传可能失败: {result}")
return False
except httpx.TimeoutException:
print("[ERROR] 上传结果超时")
return False
except httpx.ConnectError as e:
print(f"[ERROR] 上传结果连接错误: {e}")
return False
except httpx.HTTPStatusError as e:
print(f"[ERROR] 上传结果 HTTP 错误: {e}")
return False
except Exception as e:
print(f"[ERROR] 上传结果发生未知错误: {e}")
return False
if __name__ == "__main__":
# 配置 Odoo 服务地址
ODOO_BASE_URL = "https://quantify.erhe.top"
print("开始获取 LLM 生成任务...")
while True:
# 获取任务数据
task_data = fetch_llm_generate_task(odoo_base_url=ODOO_BASE_URL)
if not task_data:
break
llm_generate_result = call_llm_generate(task_data)
print(llm_generate_result)
if llm_generate_result != "failed":
print("开始上传 LLM 生成结果...")
upload_data = {
'record_id': task_data['id'],
'status': 'success',
'result': llm_generate_result
}
else:
print("开始上传 LLM 生成结果失败...")
upload_data = {
'record_id': task_data['id'],
'status': 'failed',
'result': '',
'error_message': 'generate failed'
}
# 上传结果到 Odoo
upload_success = upload_llm_result(ODOO_BASE_URL, upload_data)
if upload_success:
print("[SUCCESS] 整个流程完成")
else:
print("[FAILED] 结果上传失败")
time.sleep(60)
Loading…
Cancel
Save