jack 1 month ago
parent cee0d6fdae
commit d91dce986e
  1. 103
      rpc_alpha_workflow/02_generate_alpha_idea.py
  2. 17
      rpc_alpha_workflow/03_decode_template.py
  3. 404
      rpc_alpha_workflow/migration_plan.md
  4. 630
      rpc_alpha_workflow/reference_02.py

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from xmlrpc.client import ServerProxy import time
from datetime import datetime
from xmlrpc.client import ServerProxy, Fault
is_local = 0 is_local = 0
if is_local: if is_local:
@ -14,4 +16,101 @@ db = "quantify"
common = ServerProxy(f"{url}/xmlrpc/2/common") common = ServerProxy(f"{url}/xmlrpc/2/common")
uid = common.authenticate(db, username, password, {}) uid = common.authenticate(db, username, password, {})
print(uid) models = ServerProxy(f"{url}/xmlrpc/2/object")
# 超时时间(秒)
TIMEOUT_SECONDS = 10 * 60 # 10分钟
# 1. 搜索 alpha.idea 模型中当天创建且状态为 generated_prompt 的数据
today = datetime.now().strftime('%Y-%m-%d')
print(f"搜索日期: {today}")
# 构建搜索条件: 当天创建且状态为 generated_prompt
# Odoo 中日期字段通常存储为 datetime,我们使用 date 函数比较
search_domain = [
['status', '=', 'generated_prompt'],
['create_date', '>=', f"{today} 00:00:00"],
['create_date', '<=', f"{today} 23:59:59"]
]
idea_ids = models.execute_kw(db, uid, password, 'alpha.idea', 'search', [search_domain])
if not idea_ids:
print(f"没有找到当天状态为 'generated_prompt' 的 alpha.idea 记录")
exit(0)
print(f"找到 {len(idea_ids)} 条待处理记录: {idea_ids}")
# 2. 遍历这些数据,单线程处理
for idx, idea_id in enumerate(idea_ids, 1):
print(f"\n{'='*60}")
print(f"处理第 {idx}/{len(idea_ids)} 条记录,ID: {idea_id}")
print(f"{'='*60}")
# 点击 post_to_ms 按钮
print("调用 post_to_ms 方法...")
try:
result = models.execute_kw(db, uid, password, 'alpha.idea', 'post_to_ms', [[idea_id]])
print(f"post_to_ms 执行结果: {result}")
except Fault as e:
# 忽略 "cannot marshal None" 错误
if "cannot marshal None" in str(e):
print("post_to_ms 已执行(返回 None,这是正常的)")
else:
print(f"post_to_ms 执行出错: {e}")
continue
# 死循环等待状态变为 llm_received,带10分钟超时
print("等待 LLM 返回结果...")
start_time = time.time()
current_status = None
while True:
# 检查是否超时
elapsed_time = time.time() - start_time
if elapsed_time > TIMEOUT_SECONDS:
print(f"超时!已等待 {elapsed_time/60:.1f} 分钟,强制退出等待...")
# 在 result_message 字段回写 timeout
try:
models.execute_kw(db, uid, password, 'alpha.idea', 'write', [[idea_id], {'result_message': 'timeout'}])
print(f"已在记录 {idea_id} 的 result_message 字段写入 'timeout'")
except Exception as write_e:
print(f"写入 result_message 失败: {write_e}")
break
time.sleep(5) # 每5秒检查一次
record_data = models.execute_kw(db, uid, password, 'alpha.idea', 'read', [[idea_id], ['status', 'name']])
if not record_data:
print("记录不存在")
break
current_status = record_data[0].get('status')
record_name = record_data[0].get('name', 'Unknown')
print(f"当前状态: {current_status} (已等待 {elapsed_time:.0f} 秒)")
if current_status == 'llm_received':
print(f"LLM 已返回结果,共等待 {elapsed_time:.0f} 秒({elapsed_time/60:.1f} 分钟),开始解码模板...")
break
elif current_status == 'failed':
print(f"处理失败,跳过此记录")
break
# 如果状态是 llm_received,调用 decode_template
if current_status == 'llm_received':
print("调用 decode_template 方法...")
try:
result = models.execute_kw(db, uid, password, 'alpha.idea', 'decode_template', [[idea_id]])
print(f"decode_template 执行结果: {result}")
except Fault as e:
# 忽略 "cannot marshal None" 错误
if "cannot marshal None" in str(e):
print("decode_template 已执行(返回 None,这是正常的)")
else:
print(f"decode_template 执行出错: {e}")
print(f"记录 {idea_id} 处理完成")
print(f"{idx}/{len(idea_ids)} 条记录处理完毕,继续下一条...")
print(f"\n{'='*60}")
print("全部流程执行完成!收工!")
print(f"{'='*60}")

@ -1,17 +0,0 @@
# -*- coding: utf-8 -*-
from xmlrpc.client import ServerProxy
is_local = 0
if is_local:
url = 'http://127.0.0.1:28888'
else:
url = 'http://192.168.31.41:32000'
username = 'rpc'
password = 'aaaAAA111'
db = "quantify"
common = ServerProxy(f"{url}/xmlrpc/2/common")
uid = common.authenticate(db, username, password, {})
print(uid)

@ -0,0 +1,404 @@
# 02_generate_alpha_idea.py 微服务迁移方案
## 一、功能总结
### 1.1 核心功能
`02_generate_alpha_idea.py` 是一个 Odoo 工作流自动化脚本,主要功能:
1. **搜索数据**:在 `alpha.idea` 模型中查找当天创建且状态为 `generated_prompt` 的记录
2. **遍历处理**:单线程依次处理每条记录
3. **调用 post_to_ms**:触发 `alpha.idea` 模型的 `post_to_ms()` 方法,将数据发送到微服务
4. **等待 LLM 返回**:死循环轮询状态,每5秒检查一次
5. **超时处理**:10分钟超时,超时后在 `result_message` 字段写入 "timeout"
6. **调用 decode_template**:状态变为 `llm_received` 后,调用 `decode_template()` 方法解码模板
### 1.2 关键特性
- **单线程处理**:不并发,避免 LLM 压力过大
- **10分钟硬编码超时**:正常2-3分钟,超时即放弃
- **状态机驱动**:依赖 Odoo 模型的状态字段流转
---
## 二、微服务架构设计
### 2.1 接口设计
```
POST /api/alpha-idea/process
```
**请求参数**:
```json
{
"idea_id": 123 // alpha.idea 记录的 ID
}
```
**响应**:
```json
{
"success": true,
"message": "处理完成",
"idea_id": 123,
"wait_time": 45, // 等待秒数
"status": "llm_received"
}
```
### 2.2 处理流程
```
Odoo 按钮点击
httpx 请求 FastAPI 微服务
POST /api/alpha-idea/process (携带 idea_id)
微服务调用 Odoo XML-RPC
1. 调用 post_to_ms()
2. 轮询等待状态 (每5秒)
3. 超时10分钟或成功
4. 调用 decode_template()
返回结果给 Odoo
```
---
## 三、代码实现
### 3.1 目录结构
```
rpc_alpha_workflow/
├── 01_generate_direction.py
├── 02_generate_alpha_idea.py # 原脚本(保留备份)
├── reference_01.py
├── reference_02.py
├── migration_plan.md # 本文档
└── alpha_idea_service/ # 微服务目录
├── main.py # FastAPI 入口
├── config.py # 配置
├── odoo_client.py # Odoo XML-RPC 客户端
└── requirements.txt # 依赖
```
### 3.2 核心代码
#### config.py
```python
# -*- coding: utf-8 -*-
"""配置文件"""
# Odoo 连接配置
ODOO_URL = "http://192.168.31.41:32000"
ODOO_DB = "quantify"
ODOO_USERNAME = "rpc"
ODOO_PASSWORD = "aaaAAA111"
# 超时配置(硬编码,不对外暴露)
TIMEOUT_SECONDS = 10 * 60 # 10分钟
POLL_INTERVAL = 5 # 轮询间隔秒数
```
#### odoo_client.py
```python
# -*- coding: utf-8 -*-
"""Odoo XML-RPC 客户端"""
from xmlrpc.client import ServerProxy, Fault
from . import config
class OdooClient:
def __init__(self):
self.url = config.ODOO_URL
self.db = config.ODOO_DB
self.username = config.ODOO_USERNAME
self.password = config.ODOO_PASSWORD
self.uid = None
self.models = None
self._authenticate()
def _authenticate(self):
"""认证并获取 uid"""
common = ServerProxy(f"{self.url}/xmlrpc/2/common")
self.uid = common.authenticate(self.db, self.username, self.password, {})
self.models = ServerProxy(f"{self.url}/xmlrpc/2/object")
def call(self, model, method, args=None, kwargs=None):
"""调用 Odoo 方法"""
args = args or []
kwargs = kwargs or {}
return self.models.execute_kw(self.db, self.uid, self.password, model, method, args, kwargs)
def post_to_ms(self, idea_id: int):
"""调用 post_to_ms 方法"""
try:
result = self.call('alpha.idea', 'post_to_ms', [[idea_id]])
return {"success": True, "result": result}
except Fault as e:
if "cannot marshal None" in str(e):
return {"success": True, "result": None}
return {"success": False, "error": str(e)}
def read_record(self, idea_id: int, fields: list):
"""读取记录"""
result = self.call('alpha.idea', 'read', [[idea_id], fields])
return result[0] if result else None
def write_record(self, idea_id: int, values: dict):
"""写入记录"""
return self.call('alpha.idea', 'write', [[idea_id], values])
def decode_template(self, idea_id: int):
"""调用 decode_template 方法"""
try:
result = self.call('alpha.idea', 'decode_template', [[idea_id]])
return {"success": True, "result": result}
except Fault as e:
if "cannot marshal None" in str(e):
return {"success": True, "result": None}
return {"success": False, "error": str(e)}
```
#### main.py
```python
# -*- coding: utf-8 -*-
"""FastAPI 微服务入口"""
import time
from fastapi import FastAPI
from pydantic import BaseModel
from .odoo_client import OdooClient
from . import config
app = FastAPI(title="Alpha Idea Service")
class ProcessRequest(BaseModel):
idea_id: int
class ProcessResponse(BaseModel):
success: bool
message: str
idea_id: int
wait_time: float = 0
status: str = ""
@app.post("/api/alpha-idea/process", response_model=ProcessResponse)
async def process_idea(request: ProcessRequest):
"""
处理单个 alpha.idea 记录
1. 调用 post_to_ms
2. 等待状态变为 llm_received(10分钟超时)
3. 调用 decode_template
"""
idea_id = request.idea_id
client = OdooClient()
# 1. 调用 post_to_ms
post_result = client.post_to_ms(idea_id)
if not post_result["success"]:
return ProcessResponse(
success=False,
message=f"post_to_ms 失败: {post_result.get('error')}",
idea_id=idea_id,
status="failed"
)
# 2. 等待状态变为 llm_received
start_time = time.time()
current_status = None
while True:
elapsed_time = time.time() - start_time
# 检查超时
if elapsed_time > config.TIMEOUT_SECONDS:
# 写入 timeout
try:
client.write_record(idea_id, {"result_message": "timeout"})
except Exception:
pass
return ProcessResponse(
success=False,
message=f"超时!已等待 {elapsed_time/60:.1f} 分钟",
idea_id=idea_id,
wait_time=elapsed_time,
status="timeout"
)
time.sleep(config.POLL_INTERVAL)
# 读取状态
record = client.read_record(idea_id, ["status", "name"])
if not record:
return ProcessResponse(
success=False,
message="记录不存在",
idea_id=idea_id,
wait_time=elapsed_time,
status="not_found"
)
current_status = record.get("status")
if current_status == "llm_received":
break
elif current_status == "failed":
return ProcessResponse(
success=False,
message="处理失败",
idea_id=idea_id,
wait_time=elapsed_time,
status="failed"
)
# 3. 调用 decode_template
decode_result = client.decode_template(idea_id)
return ProcessResponse(
success=decode_result["success"],
message="处理完成" if decode_result["success"] else f"decode_template 失败: {decode_result.get('error')}",
idea_id=idea_id,
wait_time=time.time() - start_time,
status=current_status
)
@app.get("/health")
async def health_check():
"""健康检查"""
return {"status": "ok"}
```
#### requirements.txt
```
fastapi
uvicorn
pydantic
```
---
## 四、Odoo 端改造
### 4.1 修改 post_to_ms 方法
`reference_02.py``post_to_ms` 方法中,移除直接调用 httpx 发送请求到微服务的逻辑,改为:
```python
def post_to_ms(self):
"""
修改为:只准备数据,不直接发送请求
真正的发送由外部微服务触发
"""
# ... 原有校验逻辑不变 ...
# 组装请求数据
payload = {
'record_id': self.id,
'prompt': full_prompt,
'model_name': model_name,
'base_url': base_url,
'api_key': api_key,
'callback_url': callback_url,
'system_prompt': self.system_prompt,
'user_prompt': self.user_prompt,
'final_prompt': self.final_prompt,
}
# 将 payload 保存到某个字段,或者返回给调用方
# 这里我们返回 payload,让调用方(微服务)来处理
return payload
```
**或者更简单的方式**:Odoo 按钮点击时,直接 httpx 请求微服务:
```python
def btn_trigger_alpha_idea_service(self):
"""按钮方法:触发微服务处理"""
import httpx
# 获取微服务地址(从配置读取)
ms_url = self.env['ir.config_parameter'].sudo().get_param('alpha.idea.service.url')
try:
response = httpx.post(
f"{ms_url}/api/alpha-idea/process",
json={"idea_id": self.id},
timeout=600 # 10分钟超时
)
result = response.json()
if result["success"]:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': '成功',
'message': f'处理完成,等待时间: {result["wait_time"]:.0f}秒',
'type': 'success',
'sticky': False,
}
}
else:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': '失败',
'message': result["message"],
'type': 'danger',
'sticky': False,
}
}
except Exception as e:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': '错误',
'message': str(e),
'type': 'danger',
'sticky': False,
}
}
```
---
## 五、部署步骤
### 5.1 微服务部署
1. 创建目录结构
2. 复制代码文件
3. 安装依赖:`pip install -r requirements.txt`
4. 启动服务:`uvicorn alpha_idea_service.main:app --host 0.0.0.0 --port 32005`
### 5.2 Odoo 配置
1. 添加系统参数:`alpha.idea.service.url` = `http://微服务地址:32005`
2. 在 `alpha.idea` 模型添加按钮方法 `btn_trigger_alpha_idea_service`
3. 在视图添加按钮
---
## 六、注意事项
1. **单线程保证**:微服务内部不开启多线程/异步并发处理单个请求
2. **超时处理**:HTTP 客户端(Odoo 端)和微服务端都要设置 10 分钟超时
3. **状态回写**:超时后必须回写 `result_message = 'timeout'`
4. **错误处理**:任何异常都不能阻塞流程,要返回给调用方
---
## 七、待确认事项
1. 微服务端口是否固定为 32005?
2. Odoo 端是否需要批量处理接口(一次性传入多个 idea_id)?
3. 是否需要添加日志记录(loguru 等)?
4. 是否需要限制同时处理的请求数(比如只处理一个,其他排队)?

@ -0,0 +1,630 @@
# -*- coding: utf-8 -*-
import random
import sys
import uuid
import httpx
from odoo import models, fields
from . import decode_template
class AlphaIdea(models.Model):
_name = 'alpha.idea'
_description = 'Alpha Idea'
_order = 'id desc'
name = fields.Char(string='Name', required=True, default=lambda self: str(uuid.uuid4()))
status = fields.Selection([
('draft', 'Draft'),
('fetched_data', 'Fetched Data'),
('generated_prompt', 'Generated Prompt'),
('posted_to_ms', 'Posted To MS'),
('llm_received', 'LLM Received'),
('decode_template', 'Decode Template'),
('decoded', 'Decoded'),
('done', 'Done'),
('failed', 'Failed'),
('cancel', 'Cancel')
], string='Status', default='draft')
pushed = fields.Boolean(string='Pushed', default=False, readonly=True)
region = fields.Many2one('alpha.region.settings', string='Region', required=True)
universe = fields.Many2one('alpha.universe.settings', string='Universe', required=True)
data_type = fields.Selection([('MATRIX', 'MATRIX'), ('VECTOR', 'VECTOR')], string='Data Type', required=True, default='MATRIX')
delay = fields.Selection([('1', '1'), ('0', '0')], required=True, string='Delay', default='1')
data_sets = fields.Char(string='Data Sets', compute='_compute_data_sets')
meta_prompt = fields.Many2one('alpha.prompt.settings', string='Meta Prompt')
replace_prompt = fields.Text(string='Replace Prompt')
final_prompt = fields.Text(string='Final Prompt')
system_prompt = fields.Text(string='System Prompt')
user_prompt = fields.Text(string='User Prompt')
llm_generated_idea = fields.Text(string='LLM Generated Idea')
result_message = fields.Text(string='Result Message')
llm_settings_line_id = fields.Many2one('llm.settings.line', string='Model', default=lambda self: self._default_llm_settings_line_id())
def _default_llm_settings_line_id(self):
"""默认随机选择一个模型,如果没有则返回 False"""
llm_lines = self.env['llm.settings.line'].search([])
if llm_lines:
return random.choice(llm_lines).id
return False
idea_template_ids = fields.One2many('alpha.idea.template', 'idea_id', string='Idea Templates')
needed_data_set_ids = fields.One2many('alpha.needed.data.set', 'idea_id', string='Needed Data Sets')
final_expression_ids = fields.One2many('alpha.final.expression', 'idea_id', string='Final Expressions')
expression_count = fields.Integer(string='Expression Count', required=True, readonly=True, default=0, compute='_compute_expression_count')
def _compute_data_sets(self):
# 显示使用的数据集
for record in self:
record.data_sets = ''
if record.needed_data_set_ids:
data_sets_list = []
for data_set_name in record.needed_data_set_ids:
data_sets_list.append(data_set_name.name)
if len(data_sets_list) > 0:
record.data_sets = ', '.join(data_sets_list)
def action_cancel(self):
self.status = 'cancel'
def action_reset(self):
self.status = 'fetched_data'
def btn_check_and_fetch_data(self):
if not self.needed_data_set_ids:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'No data',
'message': 'No data set is needed.',
'type': 'danger',
'sticky': False,
}
}
# TODO: 通过 datasets_id, 查找 datasets 中, 是否存在 datasets_id 的数据, 如果不存在, 则在 datasets 模块中创建一条记录, 然后需要过去手动下载(必须)
success = False
for dataset in self.needed_data_set_ids:
dataset_id = self.env['alpha.datasets'].search(
[('datasets_id', '=', dataset.name),
('region', '=', self.region.id),
('universe', '=', self.universe.id),
('delay', '=', self.delay)
], limit=1)
if not dataset_id:
dataset_id = self.env['alpha.datasets'].create({
'name': str(uuid.uuid4()),
'datasets_id': dataset.name,
'region': self.region.id,
'universe': self.universe.id,
'delay': self.delay,
})
# 创建 dataset_id 的记录之后, 执行一下 dataset_id 的 btn_get_datasets 方法
try:
dataset_id.btn_get_datasets()
success = True
except Exception as e:
self.status = 'failed'
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Failed',
'message': f'Dataset fetch failed: {str(e)}',
'type': 'danger',
'sticky': False,
}
}
else:
if dataset_id.line_ids:
success = True
else:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'No data',
'message': 'Dataset does not exist.',
'type': 'danger',
'sticky': False,
}
}
if success:
self.status = 'fetched_data'
return True
def btn_generate_final_prompt(self):
if self.final_prompt:
self.final_prompt = ''
self.system_prompt = ''
self.user_prompt = ''
self.status = 'fetched_data'
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': '成功',
'message': '已清除最终提示词',
'type': 'success',
'sticky': False,
}
}
if not self.meta_prompt:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'No data!',
'message': 'Please select the prompt template first.',
'type': 'danger',
'sticky': False,
}
}
if not self.replace_prompt:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'No data!',
'message': 'Research direction cannot be empty.',
'type': 'danger',
'sticky': False,
}
}
system_prompt = '''You are executing two skills in sequence:
1) brain-data-feature-engineering
2) brain-feature-implementation
The following SKILL.md documents are authoritative; follow them exactly.
'''
data_sets_list = []
dataset_id = ''
category = ''
region = self.region.name
delay = self.delay
universe = self.universe.name
for dataset in self.needed_data_set_ids:
datasets_id = self.env['alpha.datasets'].search(
[('datasets_id', '=', dataset.name)], limit=1)
for datasets_line in datasets_id.line_ids:
data_sets_list.append({
'id': datasets_line.data_field_name,
'description': datasets_line.description
})
if not dataset_id:
dataset_id = datasets_line.dataset_id
if not category:
category = datasets_line.category_name
if not data_sets_list:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'No data!',
'message': 'No related dataset found.',
'type': 'danger',
'sticky': False,
}
}
field_count = len(data_sets_list)
operator_ids = self.env['alpha.operator.line'].search([])
operator_list = []
for operator in operator_ids:
operator_list.append({
'name': operator.name,
'category': operator.category,
'scope': operator.scope,
'description': operator.description,
'definition': operator.definition
})
if not operator_list:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'No data!',
'message': 'No related Operator found.',
'type': 'danger',
'sticky': False,
}
}
fields_json = ','.join([str(field) for field in data_sets_list])
user_prompt = '''{
"instructions": {
"output_format": "Fill OUTPUT_TEMPLATE.md with concrete content.",
"implementation_examples": "Each Implementation Example must be a template with {variable} placeholders. Use only placeholders from allowed_placeholders. Use suffix-only names; do not include dataset code/prefix/horizon.",
"no_code_fences": true,
"do_not_invent_placeholders": true
},
"dataset_context": {
"dataset_id": "''' + dataset_id + '''",
"dataset_name": null,
"dataset_description": null,
"category": "''' + category + '''",
"region": "''' + region + '''",
"delay": ''' + str(delay) + ''',
"universe": "''' + universe + '''",
"field_count": ''' + str(field_count) + '''
},
"fields": [
''' + fields_json + '''
]
}
'''
final_prompt = self.meta_prompt.prompt
final_prompt = final_prompt.replace(
'###question_driven###', self.replace_prompt)
final_prompt = final_prompt.replace('###dataset_id###', dataset_id)
final_prompt = final_prompt.replace('###category###', category)
final_prompt = final_prompt.replace('###region###', region)
final_prompt = final_prompt.replace('###delay###', str(delay))
final_prompt = final_prompt.replace(
'###field_count###', str(field_count))
final_prompt = final_prompt.replace('###universe###', universe)
final_prompt = final_prompt.replace(
'###datasets###', str(data_sets_list))
final_prompt = final_prompt.replace(
'###operators###', str(operator_list))
if self.data_type and self.data_type.upper() == "VECTOR":
vector_prompt = "since all the following the data is vector type data, before you do any process, you should choose a vector operator to generate its statistical feature to use, the data cannot be directly use. for example, if datafieldA and datafieldB are vector type data, you can use vec_avg(datafieldA) - vec_avg(datafieldB), where vec_avg() operator is used to generate the average of the data on a certain date. similarly, vector type operator can only be used on the vector type operator directly and cannot be nested, for example vec_avg(vec_sum(datafield)) is a false use."
final_prompt = final_prompt.replace(
'###vector_instruction###', vector_prompt)
else:
final_prompt = final_prompt.replace('###vector_instruction###', '')
self.system_prompt = system_prompt
self.user_prompt = user_prompt
self.final_prompt = final_prompt
self.status = 'generated_prompt'
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': '成功',
'message': f'已生成提示模板,包含 {field_count} 个数据字段',
'type': 'success',
'sticky': False,
}
}
def post_to_ms(self):
if not all([self.final_prompt, self.user_prompt, self.system_prompt]):
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Error',
'message': 'Please generate a prompt template.',
'type': 'danger',
'sticky': False,
}
}
if not self.llm_settings_line_id:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Error',
'message': 'Please select a model.',
'type': 'danger',
'sticky': False,
}
}
model_name = self.llm_settings_line_id.model_name
base_url = self.llm_settings_line_id.llm_setting_id.base_url
api_key = self.llm_settings_line_id.llm_setting_id.api_key
# 获取当前 record_id
record_id = self.id
# 获取 Odoo 回调地址
base_url_odoo = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
callback_url = f"{base_url_odoo}/api/alpha-idea/result"
# 组装完整 prompt
full_prompt = f"{self.system_prompt}\n\n{self.user_prompt}\n\n{self.final_prompt}"
# 获取微服务配置
ms_config = self.get_ms_config()
ms_url = ms_config.get('url', '')
if not ms_url:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Error',
'message': 'Microservice address not configured.',
'type': 'danger',
'sticky': False
}
}
# 组装请求数据
payload = {
'record_id': record_id,
'prompt': full_prompt,
'model_name': model_name,
'base_url': base_url,
'api_key': api_key,
'callback_url': callback_url,
'system_prompt': self.system_prompt,
'user_prompt': self.user_prompt,
'final_prompt': self.final_prompt,
}
# 发送请求到微服务
try:
httpx.post(f"{ms_url}:32004/api_alpha_generate_idea",
json=payload, timeout=0.001)
except httpx.TimeoutException:
pass
except Exception as e:
self.status = 'failed'
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Error',
'message': f'Failed to send microservice:\n{e}.',
'type': 'danger',
'sticky': False,
}
}
self.status = 'posted_to_ms'
def get_ms_config(self):
# TODO 先从 nacos 获取微服务 url
nacos_url = ''
platform_info = sys.platform
if platform_info == "darwin":
nacos_url = 'http://192.168.31.41:30848/nacos/v1/cs/configs?dataId=microservices_dev&group=quantify'
elif platform_info.startswith("linux"):
nacos_url = 'http://192.168.31.41:30848/nacos/v1/cs/configs?dataId=microservices&group=quantify'
try:
ms_config_resp = httpx.get(nacos_url)
ms_config_resp.raise_for_status()
except Exception as e:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Error',
'message': f'Nacos request failed:\n{e}.',
'type': 'danger',
'sticky': False,
}
}
ms_config = ms_config_resp.json()
return ms_config
def action_set_llm_received(self):
"""手动设置 LLM 已返回状态
当手动录入 llm_generated_idea 调用此函数将状态改为 llm_received
"""
if self.llm_generated_idea and self.status == 'generated_prompt':
self.status = 'llm_received'
return True
else:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Error',
'message': 'llm_generated_idea 无数据或状态不是 Generated Prompt',
'type': 'danger',
'sticky': False,
}
}
def decode_template(self):
if not self.llm_generated_idea or self.status != 'llm_received':
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Please use llm to generate the template first.',
'message': 'llm_generated_idea no data or status != llm_received',
'type': 'danger',
'sticky': False,
}
}
if self.status == 'done':
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'message',
'message': 'This idea has been generated, please do not repeat the operation.',
'type': 'danger',
'sticky': False,
}
}
llm_template = self.llm_generated_idea
data_sets_list = []
for dataset in self.needed_data_set_ids:
datasets_id = self.env['alpha.datasets'].search(
[('datasets_id', '=', dataset.name)], limit=1)
for datasets_line in datasets_id.line_ids:
data_sets_list.append({
'id': datasets_line.data_field_name
})
result_data = decode_template.process(data_sets_list, llm_template)
if result_data['success']:
templates = result_data['templates']
expressions = result_data['expressions']
# 如果没有解出数据,不做任何操作
if len(templates) == 0 and len(expressions) == 0:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': '提示',
'message': '未解码出任何数据',
'type': 'warning',
'sticky': False,
}
}
# 组装模板数据
template_data = []
for template_item in templates:
template_data.append({
'template': template_item['template'],
'original_template': template_item['original_template'],
'idea': template_item.get('idea', ''),
'template_line_ids': [(0, 0, {'expression': line}) for line in template_item['expressions']]
})
# 保存数据
self.write({
'idea_template_ids': [(0, 0, data) for data in template_data],
'final_expression_ids': [(0, 0, {'name': expression}) for expression in expressions],
'status': 'decoded',
})
return True
else:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': '失败',
'message': '模板解码失败, 生成数量为 0',
'type': 'danger',
'sticky': False,
}
}
def combination_settings(self):
self.ensure_one()
if self.idea_template_ids and self.final_expression_ids:
return {
'type': 'ir.actions.act_window',
'name': '组合设置',
'res_model': 'wizard.combination.settings',
'view_mode': 'form',
'target': 'new',
'context': {
'active_model': 'alpha.idea',
'active_id': self.id,
},
}
else:
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Execute the decoding template first',
'message': 'idea_template_ids and final_expression_ids no data!',
'type': 'danger',
'sticky': False,
}
}
def _compute_expression_count(self):
for record in self:
if record.final_expression_ids:
record.expression_count = len(record.final_expression_ids)
else:
record.expression_count = 0
class NeededDataSet(models.Model):
_name = 'alpha.needed.data.set'
_description = 'Alpha Needed Data Set'
idea_id = fields.Many2one('alpha.idea', string='Idea', ondelete='cascade')
name = fields.Char(string='Name')
class IdeaTemplate(models.Model):
_name = 'alpha.idea.template'
_description = 'Alpha Idea Template'
idea_id = fields.Many2one('alpha.idea', string='Idea', ondelete='cascade')
template_line_ids = fields.One2many(
'alpha.idea.template.line', 'idea_id', string='Template Lines')
template = fields.Char(string='Template')
original_template = fields.Char(string='Original Template')
expression_count = fields.Integer(
string='Expression Count', compute='_compute_expression_count', default=0)
idea = fields.Text(string='Text')
def _compute_expression_count(self):
for record in self:
record.expression_count = len(record.template_line_ids)
class IdeaTemplateLine(models.Model):
_name = 'alpha.idea.template.line'
_description = 'Alpha Idea Template Line'
idea_id = fields.Many2one('alpha.idea.template',
string='Alpha Idea Template', ondelete='cascade')
expression = fields.Char(string='Expression')
class Final_Expression(models.Model):
_name = 'alpha.final.expression'
_description = 'Alpha Final Expression'
idea_id = fields.Many2one('alpha.idea', string='Idea', ondelete='cascade')
name = fields.Char(string='Name')
Loading…
Cancel
Save