main
parent
2f420e3348
commit
61633b21b4
@ -0,0 +1,239 @@ |
||||
# -*- coding: utf-8 -*- |
||||
""" |
||||
批量隐藏低质量Alpha |
||||
目标URL: https://api.worldquantbrain.com/users/self/alphas?limit=22&offset=0&status=UNSUBMITTED%1FIS_FAIL&is.sharpe%3C0.7&is.sharpe%3E-0.8&is.turnover%3C0.2&order=-is.sharpe&hidden=false |
||||
隐藏接口: PATCH https://api.worldquantbrain.com/alphas/{alpha_id} |
||||
Payload: {"hidden":true} |
||||
""" |
||||
import random |
||||
import time |
||||
|
||||
import httpx |
||||
from httpx import BasicAuth |
||||
|
||||
# 全局配置 |
||||
TIMEOUT = 10.0 # 请求超时时间(秒) |
||||
MAX_RETRIES = 3 # 最大重试次数 |
||||
RETRY_DELAY_MIN = 3 # 重试最小等待时间(秒) |
||||
RETRY_DELAY_MAX = 5 # 重试最大等待时间(秒) |
||||
MAX_LIMIT = 100 # 每页最大数量 |
||||
MAX_PAGE = 5 # 最大页数 |
||||
|
||||
|
||||
def login(): |
||||
"""登录WorldQuant Brain API""" |
||||
# 从nacos获取账号密码 |
||||
nacos_resp = httpx.get('http://192.168.31.41:30848/nacos/v1/cs/configs?dataId=wq_account&group=quantify') |
||||
if nacos_resp.status_code != 200: |
||||
print('获取账号密码失败') |
||||
return False |
||||
|
||||
config = nacos_resp.json() |
||||
|
||||
username = config['user_name'] |
||||
password = config['password'] |
||||
|
||||
print(f"正在登录账户: {username}") |
||||
|
||||
# 创建客户端并认证,设置超时 |
||||
client = httpx.Client(auth=BasicAuth(username, password), timeout=TIMEOUT) |
||||
|
||||
# 发送登录请求 |
||||
response = client.post('https://api.worldquantbrain.com/authentication') |
||||
print(f"登录状态: {response.status_code}") |
||||
|
||||
if response.status_code == 201: |
||||
print("登录成功!") |
||||
print(response.json()) |
||||
return client |
||||
else: |
||||
print(f"登录失败: {response.json()}") |
||||
client.close() |
||||
return None |
||||
|
||||
|
||||
def request_with_retry(client, method, url, **kwargs): |
||||
""" |
||||
带重试机制的请求函数 |
||||
默认重试3次,每次等待3-5秒 |
||||
""" |
||||
for attempt in range(1, MAX_RETRIES + 1): |
||||
try: |
||||
print(f" 请求尝试 {attempt}/{MAX_RETRIES}: {method.upper()} {url}") |
||||
response = client.request(method, url, **kwargs) |
||||
return response |
||||
except Exception as e: |
||||
print(f" 请求异常: {str(e)}") |
||||
if attempt < MAX_RETRIES: |
||||
sleep_time = random.uniform(RETRY_DELAY_MIN, RETRY_DELAY_MAX) |
||||
print(f" 等待 {sleep_time:.2f} 秒后重试...") |
||||
time.sleep(sleep_time) |
||||
else: |
||||
print(f" 已达到最大重试次数 {MAX_RETRIES},放弃请求") |
||||
raise |
||||
return None |
||||
|
||||
|
||||
def fetch_all_alphas(client, base_url): |
||||
""" |
||||
分页获取所有符合条件的alpha |
||||
返回alpha信息列表(包含id和is数据) |
||||
使用for循环,步进MAX_LIMIT,最多获取MAX_PAGE页 |
||||
""" |
||||
alphas = [] |
||||
|
||||
for page in range(MAX_PAGE): |
||||
# 计算当前页的offset |
||||
offset = page * MAX_LIMIT |
||||
|
||||
# 构建当前页的URL |
||||
url = f"{base_url}&offset={offset}" |
||||
|
||||
print(f"\n正在获取第 {page + 1}/{MAX_PAGE} 页,offset={offset} 的数据...") |
||||
|
||||
try: |
||||
response = request_with_retry(client, 'get', url) |
||||
except Exception as e: |
||||
print(f"获取数据失败: {str(e)}") |
||||
break |
||||
|
||||
if response.status_code != 200: |
||||
print(f"获取数据失败: {response.status_code}") |
||||
print(f"响应: {response.text}") |
||||
break |
||||
|
||||
data = response.json() |
||||
results = data.get('results', []) |
||||
|
||||
# 如果没有结果,说明已经获取完毕 |
||||
if not results: |
||||
print("\n没有更多数据,获取完成") |
||||
break |
||||
|
||||
# 提取当前页的alpha信息 |
||||
for alpha in results: |
||||
alpha_info = { |
||||
'id': alpha.get('id'), |
||||
'is': alpha.get('is', {}) |
||||
} |
||||
if alpha_info['id']: |
||||
alphas.append(alpha_info) |
||||
print(f" 发现Alpha: {alpha_info['id']}") |
||||
|
||||
print(f"本页获取完成,共 {len(results)} 个Alpha") |
||||
|
||||
sleep_time = random.uniform(3, 5) |
||||
print(f"等待 {sleep_time:.2f} 秒后继续获取下一页数据...") |
||||
time.sleep(sleep_time) |
||||
|
||||
return alphas |
||||
|
||||
|
||||
def hide_alpha(client, alpha_info): |
||||
""" |
||||
隐藏单个Alpha |
||||
PATCH https://api.worldquantbrain.com/alphas/{alpha_id} |
||||
Payload: {"hidden":true} |
||||
隐藏后输出is数据 |
||||
""" |
||||
alpha_id = alpha_info['id'] |
||||
is_data = alpha_info.get('is', {}) |
||||
|
||||
url = f"https://api.worldquantbrain.com/alphas/{alpha_id}" |
||||
payload = {"hidden": True} |
||||
|
||||
try: |
||||
response = request_with_retry(client, 'patch', url, json=payload) |
||||
|
||||
if response.status_code in [200, 204]: |
||||
# 输出is数据 |
||||
sharpe = is_data.get('sharpe', 'N/A') |
||||
fitness = is_data.get('fitness', 'N/A') |
||||
margin = is_data.get('margin', 'N/A') |
||||
drawdown = is_data.get('drawdown', 'N/A') |
||||
returns = is_data.get('returns', 'N/A') |
||||
|
||||
print(f" ✓ Alpha {alpha_id} 隐藏成功") |
||||
print(f" sharpe={sharpe}, fitness={fitness}, margin={margin}, drawdown={drawdown}, returns={returns}") |
||||
return True |
||||
else: |
||||
print(f" ✗ Alpha {alpha_id} 隐藏失败: {response.status_code}") |
||||
print(f" 响应: {response.text}") |
||||
return False |
||||
except Exception as e: |
||||
print(f" ✗ Alpha {alpha_id} 隐藏异常: {str(e)}") |
||||
return False |
||||
|
||||
|
||||
def batch_hide_alphas(client, alphas): |
||||
""" |
||||
批量隐藏Alpha |
||||
""" |
||||
total = len(alphas) |
||||
success_count = 0 |
||||
fail_count = 0 |
||||
|
||||
print(f"\n开始批量隐藏 {total} 个Alpha...") |
||||
print("=" * 50) |
||||
|
||||
for index, alpha_info in enumerate(alphas, 1): |
||||
alpha_id = alpha_info['id'] |
||||
print(f"[{index}/{total}] 正在隐藏 Alpha: {alpha_id}") |
||||
|
||||
if hide_alpha(client, alpha_info): |
||||
success_count += 1 |
||||
else: |
||||
fail_count += 1 |
||||
|
||||
time.sleep(0.5) |
||||
|
||||
print("=" * 50) |
||||
print(f"批量隐藏完成!") |
||||
print(f"成功: {success_count} 个") |
||||
print(f"失败: {fail_count} 个") |
||||
|
||||
return success_count, fail_count |
||||
|
||||
|
||||
def run(client): |
||||
# 目标URL(获取未隐藏的、低sharpe的alpha) |
||||
TARGET_URL = "https://api.worldquantbrain.com/users/self/alphas?limit=100&offset=0&status=UNSUBMITTED%1FIS_FAIL&is.sharpe%3C0.65&is.sharpe%3E-0.8&is.turnover%3C0.2&order=-is.sharpe&hidden=false" |
||||
|
||||
if client: |
||||
try: |
||||
print("\n" + "=" * 50) |
||||
print("获取所有符合条件的Alpha") |
||||
print("=" * 50) |
||||
|
||||
alphas = fetch_all_alphas(client, TARGET_URL) |
||||
|
||||
if not alphas: |
||||
print("没有找到需要隐藏的Alpha") |
||||
exit(1) |
||||
else: |
||||
print(f"\n总共找到 {len(alphas)} 个需要隐藏的Alpha") |
||||
|
||||
print("\n" + "=" * 50) |
||||
print("批量隐藏Alpha") |
||||
print("=" * 50) |
||||
|
||||
success, fail = batch_hide_alphas(client, alphas) |
||||
|
||||
except Exception as e: |
||||
print(str(e)) |
||||
else: |
||||
print("登录失败,程序退出") |
||||
|
||||
|
||||
def main(): |
||||
# 登录 |
||||
client = login() |
||||
|
||||
while True: |
||||
run(client) |
||||
time.sleep(60) |
||||
|
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
main() |
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,65 @@ |
||||
# CLAUDE.md |
||||
|
||||
Behavioral guidelines to reduce common LLM coding mistakes. Merge with project-specific instructions as needed. |
||||
|
||||
**Tradeoff:** These guidelines bias toward caution over speed. For trivial tasks, use judgment. |
||||
|
||||
## 1. Think Before Coding |
||||
|
||||
**Don't assume. Don't hide confusion. Surface tradeoffs.** |
||||
|
||||
Before implementing: |
||||
- State your assumptions explicitly. If uncertain, ask. |
||||
- If multiple interpretations exist, present them - don't pick silently. |
||||
- If a simpler approach exists, say so. Push back when warranted. |
||||
- If something is unclear, stop. Name what's confusing. Ask. |
||||
|
||||
## 2. Simplicity First |
||||
|
||||
**Minimum code that solves the problem. Nothing speculative.** |
||||
|
||||
- No features beyond what was asked. |
||||
- No abstractions for single-use code. |
||||
- No "flexibility" or "configurability" that wasn't requested. |
||||
- No error handling for impossible scenarios. |
||||
- If you write 200 lines and it could be 50, rewrite it. |
||||
|
||||
Ask yourself: "Would a senior engineer say this is overcomplicated?" If yes, simplify. |
||||
|
||||
## 3. Surgical Changes |
||||
|
||||
**Touch only what you must. Clean up only your own mess.** |
||||
|
||||
When editing existing code: |
||||
- Don't "improve" adjacent code, comments, or formatting. |
||||
- Don't refactor things that aren't broken. |
||||
- Match existing style, even if you'd do it differently. |
||||
- If you notice unrelated dead code, mention it - don't delete it. |
||||
|
||||
When your changes create orphans: |
||||
- Remove imports/variables/functions that YOUR changes made unused. |
||||
- Don't remove pre-existing dead code unless asked. |
||||
|
||||
The test: Every changed line should trace directly to the user's request. |
||||
|
||||
## 4. Goal-Driven Execution |
||||
|
||||
**Define success criteria. Loop until verified.** |
||||
|
||||
Transform tasks into verifiable goals: |
||||
- "Add validation" → "Write tests for invalid inputs, then make them pass" |
||||
- "Fix the bug" → "Write a test that reproduces it, then make it pass" |
||||
- "Refactor X" → "Ensure tests pass before and after" |
||||
|
||||
For multi-step tasks, state a brief plan: |
||||
``` |
||||
1. [Step] → verify: [check] |
||||
2. [Step] → verify: [check] |
||||
3. [Step] → verify: [check] |
||||
``` |
||||
|
||||
Strong success criteria let you loop independently. Weak criteria ("make it work") require constant clarification. |
||||
|
||||
--- |
||||
|
||||
**These guidelines are working if:** fewer unnecessary changes in diffs, fewer rewrites due to overcomplication, and clarifying questions come before implementation rather than after mistakes. |
||||
@ -0,0 +1,383 @@ |
||||
# Alpha Transformer () |
||||
|
||||
基于AI的Alpha变种自动生成器,将一个种子Alpha转换为多个具有不同逻辑和参数的新Alpha表达式。 |
||||
|
||||
## 功能概述 |
||||
|
||||
****的核心思想是:给定一个种子Alpha作为参考,结合大量已知的Alpha模板和AI能力,自动生成多个具有不同策略逻辑的新Alpha表达式。这些变种可以用于: |
||||
- 策略多元化(避免单一策略风险) |
||||
- 参数优化(探索最优参数组合) |
||||
- 逻辑变体(发现新的市场规律) |
||||
|
||||
## 工作原理详解 |
||||
|
||||
### 整体流程 |
||||
|
||||
``` |
||||
种子Alpha → 获取详情 → AI生成模板 → 填充参数 → 验证输出 |
||||
↓ ↓ ↓ ↓ ↓ |
||||
原始ID settings 逻辑变体 表达式组合 JSON结果 |
||||
expression + expression |
||||
datafields |
||||
``` |
||||
|
||||
### 第一阶段:获取种子Alpha信息 |
||||
|
||||
当用户输入Alpha ID后,系统会: |
||||
|
||||
1. **连接BRAIN平台** - 使用提供的用户名密码认证 |
||||
2. **获取Alpha详情** - 通过API获取以下信息: |
||||
- `settings`: Alpha的配置参数(region, universe, delay等) |
||||
- `expression`: Alpha的具体表达式代码 |
||||
- `description`: Alpha的策略描述 |
||||
- `operators`: 使用的算子列表(ts_mean, group_rank等) |
||||
- `data_fields`: 使用的数据字段(close, volume等) |
||||
|
||||
3. **解析表达式结构** - 分析Alpha代码中的: |
||||
- 占位符类型(数据字段、时间窗口、分组方式等) |
||||
- 算子组合模式 |
||||
- 数据依赖关系 |
||||
|
||||
### 第二阶段:AI生成模板 |
||||
|
||||
这是**的核心**。系统会: |
||||
|
||||
1. **构建提示词** - 将以下内容发送给LLM: |
||||
``` |
||||
种子Alpha详情(settings + expression + 使用的算子) |
||||
+ 模板摘要(template_summary.md中的90+个模板) |
||||
+ 生成指令(要求生成X个变种模板) |
||||
``` |
||||
|
||||
2. **LLM推理** - AI根据以下信息生成新模板: |
||||
- 种子Alpha的核心逻辑是什么? |
||||
- template_summary中有哪些类似或可组合的模板? |
||||
- 如何创造性地变形生成新模板? |
||||
|
||||
3. **模板输出** - LLM返回的每个模板包含: |
||||
- `template_expression`: 使用占位符的模板表达式 |
||||
- 例如:`group_rank(<data_field/> / ts_mean(<data_field/>, <window/>))` |
||||
- `template_explanation`: 模板的核心思想和逻辑说明 |
||||
|
||||
### 第三阶段:模板填充 |
||||
|
||||
对每个生成的模板,系统会: |
||||
|
||||
1. **识别占位符** - 解析模板中的所有`<placeholder/>`: |
||||
```python |
||||
# 常见的占位符类型 |
||||
<data_field/> # 数据字段(如 close, volume, pe_ratio) |
||||
<window/> # 时间窗口(如 20, 60, 120) |
||||
<group/> # 分组方式(如 industry, sector) |
||||
<operator/> # 操作符(如 ts_mean, rank) |
||||
``` |
||||
|
||||
2. **获取候选数据** - 从BRAIN API获取: |
||||
- 符合条件的数据字段列表(top_n个) |
||||
- 可用的窗口参数 |
||||
- 支持的分组方式 |
||||
|
||||
3. **组合生成** - 笛卡尔积组合所有候选: |
||||
```python |
||||
# 如果模板有2个<data_field/>和3个<window/> |
||||
# 则生成 50 * 50 * 3 = 7500 个表达式(示例) |
||||
|
||||
# 实际会限制总数,避免组合爆炸 |
||||
``` |
||||
|
||||
4. **设置继承** - 新生成的表达式会继承种子Alpha的: |
||||
- Region(地区) |
||||
- Universe(股票池) |
||||
- Delay(延迟) |
||||
- Neutralization(中性化方式) |
||||
|
||||
### 第四阶段:验证与输出 |
||||
|
||||
1. **表达式验证** - 验证生成的表达式是否合法: |
||||
- 语法检查 |
||||
- 数据依赖检查 |
||||
- 算子兼容性检查 |
||||
|
||||
2. **结果分类** - 分成三类输出: |
||||
- **Alpha_candidates.json**: 模板级别的结果 |
||||
- 包含模板表达式和每个占位符的候选列表 |
||||
- 可用于进一步手动编辑或参数调整 |
||||
|
||||
- **Alpha_generated_expressions_success.json**: 成功生成的表达式 |
||||
- 具体的、可直接使用的Alpha表达式 |
||||
- 可导入BRAIN或回测器 |
||||
|
||||
- **Alpha_generated_expressions_error.json**: 失败的表达式 |
||||
- 生成或验证过程中出错的表达式 |
||||
- 用于排查模板问题 |
||||
|
||||
## template_summary.txt 的作用 |
||||
|
||||
### 文件内容 |
||||
|
||||
`template_summary.md`是一个包含**90+个精选Alpha模板**的文档,每个模板包含: |
||||
|
||||
1. **Hypothesis(假设)** - 策略的核心思想 |
||||
``` |
||||
"After news is released, if a stock takes a longer time to rise, |
||||
it may show strong evidence of upward momentum" |
||||
``` |
||||
|
||||
2. **Expression(表达式)** - 具体的Alpha代码 |
||||
``` |
||||
`ts_backfill(vec_avg(nws12_prez_4l),504)` |
||||
``` |
||||
|
||||
3. **Settings(设置)** - 推荐使用的配置 |
||||
``` |
||||
Region: USA, Universe: TOP500, Delay: 1 |
||||
``` |
||||
|
||||
4. **逻辑链深度解析** - 为什么这个Alpha有效 |
||||
``` |
||||
- 时序相对性: ts_backfill处理新闻数据的稀疏性 |
||||
- 算子深意: vec_avg聚合多维情绪 |
||||
``` |
||||
|
||||
5. **优化方向** - 如何进一步改进 |
||||
``` |
||||
- 去噪: 增加winsorize或rank |
||||
- 从属信号: 叠加Social Media Effect |
||||
``` |
||||
|
||||
### 模板分类 |
||||
|
||||
文件中的模板来自多个来源: |
||||
|
||||
1. **Learn系列** - BRAIN官方教程中的示例 |
||||
- Learn101: 基础Alpha示例 |
||||
- Learn102: 中级Alpha示例 |
||||
- Learn103: 高级Alpha示例 |
||||
|
||||
2. **《151 Trading Strategies》** - 学术论文中的策略 |
||||
- 动量策略 |
||||
- 价值策略 |
||||
- 波动率策略 |
||||
|
||||
3. **社区精选** - 论坛中高评分的Alpha |
||||
|
||||
### 为什么修改它能生成不同结果? |
||||
|
||||
**是的,这是生成不同Alpha变种的关键!** |
||||
|
||||
当你修改`template_summary.md`时: |
||||
|
||||
| 修改内容 | 影响 | |
||||
|---------|------| |
||||
| **增加新模板** | LLM有更多参考,生成更多样化的变种 | |
||||
| **删除旧模板** | 生成的变种会集中在剩余模板上 | |
||||
| **修改模板说明** | LLM对模板的理解改变,生成的变种逻辑不同 | |
||||
| **调整模板格式** | 可能影响LLM的解析和理解 | |
||||
|
||||
### 如何优化template_summary |
||||
|
||||
**建议策略:** |
||||
|
||||
1. **按主题分类** - 如果你想生成某类策略的变种 |
||||
```markdown |
||||
## 动量策略 |
||||
[相关的5-10个模板] |
||||
|
||||
## 价值策略 |
||||
[相关的5-10个模板] |
||||
``` |
||||
|
||||
2. **加入自己的Alpha** - 如果你有成功的Alpha |
||||
```markdown |
||||
## 我的成功策略 |
||||
**Expression**: `group_rank(close / ts_mean(close, 20))` |
||||
**核心思想**: 均线偏离策略 |
||||
``` |
||||
|
||||
3. **保持格式一致** - 确保每个模板都包含: |
||||
- 清晰的假设 |
||||
- 具体表达式 |
||||
- 逻辑解析 |
||||
|
||||
4. **定期更新** - 随着策略进化,不断添加新的有效模板 |
||||
|
||||
## 快速开始 |
||||
|
||||
### 1. 安装依赖 |
||||
|
||||
```bash |
||||
cd /Users/jack/source/mySpace/mycode/my_project/py/alpha/WqApp/simple72 |
||||
pip install -r requirements.txt |
||||
``` |
||||
|
||||
### 2. 启动服务 |
||||
|
||||
```bash |
||||
python main.py |
||||
``` |
||||
|
||||
服务将在 http://localhost:8000 启动 |
||||
|
||||
### 3. 使用Web界面 |
||||
|
||||
1. 打开浏览器访问 http://localhost:8000 |
||||
2. 填写表单: |
||||
- **Alpha ID**: 输入种子Alpha的ID(格式如 `ak2YPVxv`) |
||||
- **LLM API Key**: 你的LLM服务API密钥 |
||||
- **LLM Base URL**: LLM服务地址 |
||||
- Kimi: `https://api.moonshot.cn/v1` |
||||
- OpenAI: `https://api.openai.com/v1` |
||||
- 其他: `https://your-llm-service.com/v1` |
||||
- **LLM Model**: 模型名称(如 `kimi-k2.5`, `gpt-4`) |
||||
- **BRAIN Username/Password**: 你的BRAIN平台账号 |
||||
3. 点击"生成变种" |
||||
4. 等待处理完成(通常3-10分钟) |
||||
5. 查看或复制JSON结果 |
||||
|
||||
### 4. 使用API |
||||
|
||||
```bash |
||||
curl -X POST http://localhost:8000/api/generate \ |
||||
-H "Content-Type: application/json" \ |
||||
-d '{ |
||||
"alpha_id": "ak2YPVxv", |
||||
"llm_api_key": "your-api-key", |
||||
"llm_base_url": "https://api.moonshot.cn/v1", |
||||
"llm_model": "kimi-k2.5", |
||||
"brain_username": "your-brain-user", |
||||
"brain_password": "your-brain-pass", |
||||
"top_n_datafield": 50 |
||||
}' |
||||
``` |
||||
|
||||
## API端点 |
||||
|
||||
### GET / |
||||
- **功能**: 主页面 |
||||
- **返回**: HTML页面 |
||||
|
||||
### POST /api/generate |
||||
- **功能**: 生成Alpha变种 |
||||
- **请求体**: |
||||
```json |
||||
{ |
||||
"alpha_id": "string (必填)", |
||||
"llm_api_key": "string (必填)", |
||||
"llm_base_url": "string (必填)", |
||||
"llm_model": "string (必填)", |
||||
"brain_username": "string (必填)", |
||||
"brain_password": "string (必填)", |
||||
"top_n_datafield": "int (可选,默认50)", |
||||
"user_region": "string (可选)", |
||||
"user_universe": "string (可选)", |
||||
"user_delay": "int (可选)", |
||||
"user_category": "string (可选)", |
||||
"user_data_type": "string (可选,默认MATRIX)" |
||||
} |
||||
``` |
||||
- **响应**: |
||||
```json |
||||
{ |
||||
"success": true, |
||||
"alpha_id": "种子Alpha ID", |
||||
"candidates": [...], |
||||
"expressions_success": [...], |
||||
"expressions_error": [...] |
||||
} |
||||
``` |
||||
|
||||
### GET /api/health |
||||
- **功能**: 健康检查 |
||||
- **响应**: `{"status": "healthy", "service": "alpha-transformer"}` |
||||
|
||||
## 项目结构 |
||||
|
||||
``` |
||||
simple72/ |
||||
├── main.py # FastAPI应用入口 |
||||
├── requirements.txt # 依赖清单 |
||||
├── Tranformer/ # Transformer核心模块 |
||||
│ ├── Transformer.py # 主逻辑(~5000行) |
||||
│ │ ├── generate_alpha_description() # 获取Alpha详情 |
||||
│ │ ├── generate_new_alphas() # 生成新Alpha |
||||
│ │ ├── propose_alpha_templates() # LLM生成模板 |
||||
│ │ ├── populate_template() # 填充模板 |
||||
│ │ └── validate_expression() # 验证表达式 |
||||
│ ├── ace_lib.py # BRAIN API客户端 |
||||
│ ├── helpful_functions.py # 辅助函数 |
||||
│ ├── validator.py # 表达式验证器 |
||||
│ ├── template_summary.md # 模板摘要(可自定义) |
||||
│ └── output/ # 输出目录 |
||||
│ ├── Alpha_candidates.json |
||||
│ ├── Alpha_generated_expressions_success.json |
||||
│ └── Alpha_generated_expressions_error.json |
||||
└── templates/ |
||||
└── index.html # 前端页面 |
||||
``` |
||||
|
||||
## 高级配置 |
||||
|
||||
### 调整生成数量 |
||||
|
||||
修改`top_n_datafield`参数: |
||||
- 值越大 → 生成的表达式越多,但处理时间越长 |
||||
- 值越小 → 生成更快,但可能错过好的变种 |
||||
- 建议值:30-100之间 |
||||
|
||||
### 自定义模板摘要 |
||||
|
||||
编辑`Tranformer/template_summary.md`: |
||||
- 添加你认为有效的Alpha模板 |
||||
- 按策略类型分类整理 |
||||
- 保持每个模板的格式一致性 |
||||
|
||||
### 限制生成范围 |
||||
|
||||
通过可选参数限制生成范围: |
||||
- `user_region`: 只在特定地区生成 |
||||
- `user_universe`: 只在特定股票池生成 |
||||
- `user_delay`: 只使用特定的延迟设置 |
||||
|
||||
## 常见问题 |
||||
|
||||
### Q: 生成失败怎么办? |
||||
A: 检查以下几点: |
||||
1. BRAIN账号密码是否正确 |
||||
2. LLM API Key是否有效 |
||||
3. Alpha ID是否存在且可访问 |
||||
4. 查看返回的error字段 |
||||
|
||||
### Q: 生成的结果都是类似的? |
||||
A: 尝试: |
||||
1. 修改`template_summary.md`,添加更多样化的模板 |
||||
2. 调整`top_n_datafield`,增加数据字段候选 |
||||
3. 使用不同的种子Alpha |
||||
|
||||
### Q: 生成时间太长? |
||||
A: 这是正常的,因为: |
||||
- LLM调用需要时间 |
||||
- BRAIN API查询数据字段 |
||||
- 表达式组合和验证 |
||||
|
||||
可以: |
||||
- 减少`top_n_datafield` |
||||
- 简化`template_summary.md` |
||||
- 使用本地部署的LLM |
||||
|
||||
## 技术栈 |
||||
|
||||
- **后端**: FastAPI + asyncio |
||||
- **LLM调用**: OpenAI SDK (AsyncOpenAI) |
||||
- **BRAIN连接**: requests Session |
||||
- **前端**: 原生HTML/CSS/JavaScript |
||||
- **验证**: 自定义表达式验证器 |
||||
|
||||
## 许可证 |
||||
|
||||
MIT License |
||||
|
||||
## 参考资源 |
||||
|
||||
- [WorldQuant BRAIN 文档](https://www.worldquantbrain.com/) |
||||
- [BRAIN表达式语法](https://www.worldquantbrain.com/data/expressions) |
||||
- [BRAIN算子列表](https://www.worldquantbrain.com/data/operators) |
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1 @@ |
||||
# Transformer module |
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,180 @@ |
||||
import json |
||||
import os |
||||
from typing import Union |
||||
|
||||
import pandas as pd |
||||
from pandas.io.formats.style import Styler |
||||
|
||||
brain_api_url = os.environ.get("BRAIN_API_URL", "https://api.worldquantbrain.com") |
||||
brain_url = os.environ.get("BRAIN_URL", "https://platform.worldquantbrain.com") |
||||
|
||||
|
||||
def make_clickable_alpha_id(alpha_id: str) -> str: |
||||
""" |
||||
Create a clickable HTML link for an alpha ID. |
||||
|
||||
Args: |
||||
alpha_id (str): The ID of the alpha. |
||||
|
||||
Returns: |
||||
str: An HTML string containing a clickable link to the alpha's page on the platform. |
||||
""" |
||||
|
||||
url = brain_url + "/alpha/" |
||||
return f'<a href="{url}{alpha_id}">{alpha_id}</a>' |
||||
|
||||
|
||||
def prettify_result( |
||||
result: list, detailed_tests_view: bool = False, clickable_alpha_id: bool = False |
||||
) -> Union[pd.DataFrame, Styler]: |
||||
""" |
||||
Combine and format simulation results into a single DataFrame for analysis. |
||||
|
||||
Args: |
||||
result (list): A list of dictionaries containing simulation results. |
||||
detailed_tests_view (bool, optional): If True, include detailed test results. Defaults to False. |
||||
clickable_alpha_id (bool, optional): If True, make alpha IDs clickable. Defaults to False. |
||||
|
||||
Returns: |
||||
pandas.DataFrame or pandas.io.formats.style.Styler: A DataFrame containing formatted results, |
||||
optionally with clickable alpha IDs. |
||||
""" |
||||
list_of_is_stats = [result[x]["is_stats"] for x in range(len(result)) if result[x]["is_stats"] is not None] |
||||
is_stats_df = pd.concat(list_of_is_stats).reset_index(drop=True) |
||||
is_stats_df = is_stats_df.sort_values("fitness", ascending=False) |
||||
|
||||
expressions = { |
||||
result[x]["alpha_id"]: ( |
||||
{ |
||||
"selection": result[x]["simulate_data"]["selection"], |
||||
"combo": result[x]["simulate_data"]["combo"], |
||||
} |
||||
if result[x]["simulate_data"]["type"] == "SUPER" |
||||
else result[x]["simulate_data"]["regular"] |
||||
) |
||||
for x in range(len(result)) |
||||
if result[x]["is_stats"] is not None |
||||
} |
||||
expression_df = pd.DataFrame(list(expressions.items()), columns=["alpha_id", "expression"]) |
||||
|
||||
list_of_is_tests = [result[x]["is_tests"] for x in range(len(result)) if result[x]["is_tests"] is not None] |
||||
is_tests_df = pd.concat(list_of_is_tests, sort=True).reset_index(drop=True) |
||||
is_tests_df = is_tests_df[is_tests_df["result"] != "WARNING"] |
||||
if detailed_tests_view: |
||||
cols = ["limit", "result", "value"] |
||||
is_tests_df["details"] = is_tests_df[cols].to_dict(orient="records") |
||||
is_tests_df = is_tests_df.pivot(index="alpha_id", columns="name", values="details").reset_index() |
||||
else: |
||||
is_tests_df = is_tests_df.pivot(index="alpha_id", columns="name", values="result").reset_index() |
||||
|
||||
alpha_stats = pd.merge(is_stats_df, expression_df, on="alpha_id") |
||||
alpha_stats = pd.merge(alpha_stats, is_tests_df, on="alpha_id") |
||||
alpha_stats = alpha_stats.drop(columns=alpha_stats.columns[(alpha_stats == "PENDING").any()]) |
||||
alpha_stats.columns = alpha_stats.columns.str.replace("(?<=[a-z])(?=[A-Z])", "_", regex=True).str.lower() |
||||
if clickable_alpha_id: |
||||
return alpha_stats.style.format({"alpha_id": lambda x: make_clickable_alpha_id(str(x))}) |
||||
return alpha_stats |
||||
|
||||
|
||||
def concat_pnl(result: list) -> pd.DataFrame: |
||||
""" |
||||
Combine PnL results from multiple alphas into a single DataFrame. |
||||
|
||||
Args: |
||||
result (list): A list of dictionaries containing simulation results with PnL data. |
||||
|
||||
Returns: |
||||
pandas.DataFrame: A DataFrame containing combined PnL data for all alphas. |
||||
""" |
||||
list_of_pnls = [result[x]["pnl"] for x in range(len(result)) if result[x]["pnl"] is not None] |
||||
pnls_df = pd.concat(list_of_pnls).reset_index() |
||||
|
||||
return pnls_df |
||||
|
||||
|
||||
def concat_is_tests(result: list) -> pd.DataFrame: |
||||
""" |
||||
Combine in-sample test results from multiple alphas into a single DataFrame. |
||||
|
||||
Args: |
||||
result (list): A list of dictionaries containing simulation results with in-sample test data. |
||||
|
||||
Returns: |
||||
pandas.DataFrame: A DataFrame containing combined in-sample test results for all alphas. |
||||
""" |
||||
is_tests_list = [result[x]["is_tests"] for x in range(len(result)) if result[x]["is_tests"] is not None] |
||||
is_tests_df = pd.concat(is_tests_list, sort=True).reset_index(drop=True) |
||||
return is_tests_df |
||||
|
||||
|
||||
def save_simulation_result(result: dict) -> None: |
||||
""" |
||||
Save the simulation result to a JSON file in the 'simulation_results' folder. |
||||
|
||||
Args: |
||||
result (dict): A dictionary containing the simulation result for an alpha. |
||||
""" |
||||
|
||||
alpha_id = result["id"] |
||||
region = result["settings"]["region"] |
||||
folder_path = "simulation_results/" |
||||
file_path = os.path.join(folder_path, f"{alpha_id}_{region}") |
||||
|
||||
os.makedirs(folder_path, exist_ok=True) |
||||
|
||||
with open(file_path, "w", encoding="utf-8") as file: |
||||
json.dump(result, file) |
||||
|
||||
|
||||
def save_pnl(pnl_df: pd.DataFrame, alpha_id: str, region: str) -> None: |
||||
""" |
||||
Save the PnL data for an alpha to a CSV file in the 'alphas_pnl' folder. |
||||
|
||||
Args: |
||||
pnl_df (pandas.DataFrame): The DataFrame containing PnL data. |
||||
alpha_id (str): The ID of the alpha. |
||||
region (str): The region for which the PnL data was generated. |
||||
""" |
||||
|
||||
folder_path = "alphas_pnl/" |
||||
file_path = os.path.join(folder_path, f"{alpha_id}_{region}.csv") |
||||
os.makedirs(folder_path, exist_ok=True) |
||||
|
||||
pnl_df.to_csv(file_path) |
||||
|
||||
|
||||
def save_yearly_stats(yearly_stats: pd.DataFrame, alpha_id: str, region: str): |
||||
""" |
||||
Save the yearly statistics for an alpha to a CSV file in the 'yearly_stats' folder. |
||||
|
||||
Args: |
||||
yearly_stats (pandas.DataFrame): The DataFrame containing yearly statistics. |
||||
alpha_id (str): The ID of the alpha. |
||||
region (str): The region for which the statistics were generated. |
||||
""" |
||||
|
||||
folder_path = "yearly_stats/" |
||||
file_path = os.path.join(folder_path, f"{alpha_id}_{region}.csv") |
||||
os.makedirs(folder_path, exist_ok=True) |
||||
|
||||
yearly_stats.to_csv(file_path, index=False) |
||||
|
||||
|
||||
def expand_dict_columns(data: pd.DataFrame) -> pd.DataFrame: |
||||
""" |
||||
Expand dictionary columns in a DataFrame into separate columns. |
||||
|
||||
Args: |
||||
data (pandas.DataFrame): The input DataFrame with dictionary columns. |
||||
|
||||
Returns: |
||||
pandas.DataFrame: A new DataFrame with expanded columns. |
||||
""" |
||||
dict_columns = list(filter(lambda x: isinstance(data[x].iloc[0], dict), data.columns)) |
||||
new_columns = pd.concat( |
||||
[data[col].apply(pd.Series).rename(columns=lambda x: f"{col}_{x}") for col in dict_columns], |
||||
axis=1, |
||||
) |
||||
|
||||
data = pd.concat([data, new_columns], axis=1) |
||||
return data |
||||
@ -0,0 +1,112 @@ |
||||
{ |
||||
"ts_zscore(divide(avg_pct_change_estimate_12m_earnings_7d, add(count_analysts_lower_curr_qtr_earnings_30d, 0.0001)), 126)": { |
||||
"template_explanation": "This template applies a 126-day rolling z-score normalization to the original earnings confidence ratio. By standardizing the signal relative to its own historical distribution, it captures whether current earnings optimism (vs. near-term pessimism) is unusually strong or weak compared to historical norms, enabling mean-reversion or momentum trading around historical equilibrium points.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "IND", |
||||
"universe": "TOP500", |
||||
"delay": 1, |
||||
"decay": 6, |
||||
"neutralization": "SLOW_AND_FAST", |
||||
"truncation": 0.02, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"maxPosition": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2014-01-01", |
||||
"endDate": "2023-12-31" |
||||
}, |
||||
"placeholder_candidates": {} |
||||
}, |
||||
"group_zscore(ts_mean(avg_pct_change_estimate_12m_earnings_7d, 66), industry)": { |
||||
"template_explanation": "This template calculates the 66-day mean of the 12-month earnings estimate change and then performs industry-relative z-score normalization. It extracts pure earnings momentum by removing sector-wide trends, identifying stocks within each industry that have stronger or weaker earnings revisions than their peer group average.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "IND", |
||||
"universe": "TOP500", |
||||
"delay": 1, |
||||
"decay": 6, |
||||
"neutralization": "SLOW_AND_FAST", |
||||
"truncation": 0.02, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"maxPosition": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2014-01-01", |
||||
"endDate": "2023-12-31" |
||||
}, |
||||
"placeholder_candidates": {} |
||||
}, |
||||
"ts_decay_linear(avg_pct_change_estimate_12m_earnings_7d, 20)": { |
||||
"template_explanation": "This template applies exponential decay weighting to the 12-month earnings estimate changes over a 20-day window. Recent earnings revisions receive higher weight than older ones, creating a smoothed momentum signal that responds quickly to new information while filtering out short-term noise\u2014a refined version focusing purely on the numerator's forward-looking signal.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "IND", |
||||
"universe": "TOP500", |
||||
"delay": 1, |
||||
"decay": 6, |
||||
"neutralization": "SLOW_AND_FAST", |
||||
"truncation": 0.02, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"maxPosition": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2014-01-01", |
||||
"endDate": "2023-12-31" |
||||
}, |
||||
"placeholder_candidates": {} |
||||
}, |
||||
"regression_neut(ts_mean(avg_pct_change_estimate_12m_earnings_7d, 66), log(cap))": { |
||||
"template_explanation": "This template removes the market cap factor exposure from the earnings momentum signal using regression neutralization. By stripping out size bias (larger companies may have more analyst coverage and different revision patterns), this alpha isolates the pure earnings-specific component, reducing unintended factor tilts.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "IND", |
||||
"universe": "TOP500", |
||||
"delay": 1, |
||||
"decay": 6, |
||||
"neutralization": "SLOW_AND_FAST", |
||||
"truncation": 0.02, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"maxPosition": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2014-01-01", |
||||
"endDate": "2023-12-31" |
||||
}, |
||||
"placeholder_candidates": {} |
||||
}, |
||||
"divide(ts_rank(avg_pct_change_estimate_12m_earnings_7d, 252), add(ts_rank(count_analysts_lower_curr_qtr_earnings_30d, 126), 0.1))": { |
||||
"template_explanation": "This template converts both earnings estimate change and analyst cut counts into percentile ranks before taking their ratio. The 252-day rank for earnings captures long-term earnings momentum, while the 126-day rank for analyst cuts captures recent bearishness. Ranking before division creates a more robust, distribution-invariant signal that is comparable across different market regimes.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "IND", |
||||
"universe": "TOP500", |
||||
"delay": 1, |
||||
"decay": 6, |
||||
"neutralization": "SLOW_AND_FAST", |
||||
"truncation": 0.02, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"maxPosition": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2014-01-01", |
||||
"endDate": "2023-12-31" |
||||
}, |
||||
"placeholder_candidates": {} |
||||
} |
||||
} |
||||
@ -0,0 +1,654 @@ |
||||
{ |
||||
"group_rank(ts_mean(ts_delta(ts_backfill(<fundamental_field/>, <backfill_days/>), <delta_days/>), <mean_days/>) / ts_std_dev(ts_delta(ts_backfill(<fundamental_field/>, <backfill_days/>), <delta_days/>), <std_days/>), <group_field/>)": { |
||||
"template_explanation": "Generalized risk-adjusted fundamental momentum: replaces EPS with any quarterly fundamental, keeps the Sharpe-like score and intra-group ranking to isolate smooth, persistent trends while neutralizing structural differences across groups.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "GLB", |
||||
"universe": "TOPDIV3000", |
||||
"delay": 1, |
||||
"decay": 0, |
||||
"neutralization": "STATISTICAL", |
||||
"truncation": 0.01, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2013-01-20", |
||||
"endDate": "2023-01-20" |
||||
}, |
||||
"placeholder_candidates": { |
||||
"<fundamental_field/>": { |
||||
"type": "data_field", |
||||
"candidates": [ |
||||
{ |
||||
"id": "fnd23_icsm_m_vers", |
||||
"description": "[Quarterly] Revenue" |
||||
}, |
||||
{ |
||||
"id": "fnd72_s_pit_or_is_q_net_income", |
||||
"description": "Net Income" |
||||
}, |
||||
{ |
||||
"id": "mdl219_1_ocfmargin", |
||||
"description": "Operating cash flow margin calculated as operating cash flow divided by sales." |
||||
}, |
||||
{ |
||||
"id": "fnd23_tangible_bvps", |
||||
"description": "tangible book value per share." |
||||
}, |
||||
{ |
||||
"id": "ebitda_per_share_trailing_twelve_months", |
||||
"description": "EBITDA per share for the trailing twelve months." |
||||
} |
||||
] |
||||
}, |
||||
"<backfill_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 252 |
||||
}, |
||||
{ |
||||
"value": 504 |
||||
}, |
||||
{ |
||||
"value": 756 |
||||
}, |
||||
{ |
||||
"value": 1008 |
||||
}, |
||||
{ |
||||
"value": 1260 |
||||
} |
||||
] |
||||
}, |
||||
"<delta_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 42 |
||||
}, |
||||
{ |
||||
"value": 63 |
||||
}, |
||||
{ |
||||
"value": 84 |
||||
}, |
||||
{ |
||||
"value": 126 |
||||
}, |
||||
{ |
||||
"value": 252 |
||||
} |
||||
] |
||||
}, |
||||
"<mean_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 20 |
||||
}, |
||||
{ |
||||
"value": 40 |
||||
}, |
||||
{ |
||||
"value": 60 |
||||
}, |
||||
{ |
||||
"value": 80 |
||||
}, |
||||
{ |
||||
"value": 120 |
||||
} |
||||
] |
||||
}, |
||||
"<std_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 20 |
||||
}, |
||||
{ |
||||
"value": 40 |
||||
}, |
||||
{ |
||||
"value": 60 |
||||
}, |
||||
{ |
||||
"value": 120 |
||||
}, |
||||
{ |
||||
"value": 252 |
||||
} |
||||
] |
||||
}, |
||||
"<group_field/>": { |
||||
"type": "group_data_field", |
||||
"candidates": [ |
||||
{ |
||||
"name": "industry" |
||||
}, |
||||
{ |
||||
"name": "sector" |
||||
}, |
||||
{ |
||||
"name": "subindustry" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
}, |
||||
"group_rank(ts_mean(ts_delta(<price_field/>, <short_delta/>), <medium_mean/>) / ts_std_dev(ts_delta(<price_field/>, <short_delta/>), <medium_std/>), <group_field/>)": { |
||||
"template_explanation": "Price-momentum version of the seed: uses returns instead of EPS change, still rewards consistent momentum within each sector/industry, producing a cleaner trend signal that is not distorted by industry-wide volatility regimes.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "GLB", |
||||
"universe": "TOPDIV3000", |
||||
"delay": 1, |
||||
"decay": 0, |
||||
"neutralization": "STATISTICAL", |
||||
"truncation": 0.01, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2013-01-20", |
||||
"endDate": "2023-01-20" |
||||
}, |
||||
"placeholder_candidates": { |
||||
"<price_field/>": { |
||||
"type": "data_field", |
||||
"candidates": [ |
||||
{ |
||||
"id": "close", |
||||
"description": "Daily close price" |
||||
}, |
||||
{ |
||||
"id": "open", |
||||
"description": "Daily open price" |
||||
}, |
||||
{ |
||||
"id": "vwap", |
||||
"description": "Daily volume weighted average price" |
||||
}, |
||||
{ |
||||
"id": "oth463_adjust_close", |
||||
"description": "Adjusted close price" |
||||
}, |
||||
{ |
||||
"id": "high", |
||||
"description": "Daily high price" |
||||
} |
||||
] |
||||
}, |
||||
"<short_delta/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 21 |
||||
}, |
||||
{ |
||||
"value": 42 |
||||
} |
||||
] |
||||
}, |
||||
"<medium_mean/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 21 |
||||
}, |
||||
{ |
||||
"value": 42 |
||||
}, |
||||
{ |
||||
"value": 63 |
||||
}, |
||||
{ |
||||
"value": 126 |
||||
} |
||||
] |
||||
}, |
||||
"<medium_std/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 20 |
||||
}, |
||||
{ |
||||
"value": 40 |
||||
}, |
||||
{ |
||||
"value": 60 |
||||
}, |
||||
{ |
||||
"value": 80 |
||||
}, |
||||
{ |
||||
"value": 120 |
||||
} |
||||
] |
||||
}, |
||||
"<group_field/>": { |
||||
"type": "group_data_field", |
||||
"candidates": [ |
||||
{ |
||||
"name": "industry" |
||||
}, |
||||
{ |
||||
"name": "subindustry" |
||||
}, |
||||
{ |
||||
"name": "sector" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
}, |
||||
"group_rank((<fundamental_field/> - ts_delay(<fundamental_field/>, <lag_quarters/>)) / ts_std_dev(<fundamental_field/> - ts_delay(<fundamental_field/>, <lag_quarters/>), <std_quarters/>), <group_field/>)": { |
||||
"template_explanation": "SUE-style surprise on any quarterly metric: standardizes the YoY (or QoQ) jump by its own historical volatility, then ranks within peer groups to highlight genuine outliers while canceling systematic industry drift.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "GLB", |
||||
"universe": "TOPDIV3000", |
||||
"delay": 1, |
||||
"decay": 0, |
||||
"neutralization": "STATISTICAL", |
||||
"truncation": 0.01, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2013-01-20", |
||||
"endDate": "2023-01-20" |
||||
}, |
||||
"placeholder_candidates": { |
||||
"<fundamental_field/>": { |
||||
"type": "data_field", |
||||
"candidates": [ |
||||
{ |
||||
"id": "fnd72_s_pit_or_is_q_net_income", |
||||
"description": "Net Income" |
||||
}, |
||||
{ |
||||
"id": "star_rev_surprise_prediction_fy2", |
||||
"description": "Revenue predicted surprise pct - forward 2 years" |
||||
}, |
||||
{ |
||||
"id": "star_eps_smart_estimate_12m", |
||||
"description": "SmartEstimate F12M EPS" |
||||
}, |
||||
{ |
||||
"id": "ebitda_per_share_trailing_twelve_months", |
||||
"description": "EBITDA per share for the trailing twelve months." |
||||
}, |
||||
{ |
||||
"id": "mdl219_1_ocfmargin", |
||||
"description": "Operating cash flow margin calculated as operating cash flow divided by sales." |
||||
} |
||||
] |
||||
}, |
||||
"<lag_quarters/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 1 |
||||
}, |
||||
{ |
||||
"value": 4 |
||||
}, |
||||
{ |
||||
"value": 8 |
||||
} |
||||
] |
||||
}, |
||||
"<std_quarters/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 8 |
||||
}, |
||||
{ |
||||
"value": 12 |
||||
}, |
||||
{ |
||||
"value": 16 |
||||
}, |
||||
{ |
||||
"value": 20 |
||||
} |
||||
] |
||||
}, |
||||
"<group_field/>": { |
||||
"type": "group_data_field", |
||||
"candidates": [ |
||||
{ |
||||
"name": "industry" |
||||
}, |
||||
{ |
||||
"name": "sector" |
||||
}, |
||||
{ |
||||
"name": "subindustry" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
}, |
||||
"group_rank(ts_mean(ts_delta(ts_backfill(<alternative_field/>, <backfill_days/>), <delta_days/>), <mean_days/>) / ts_std_dev(ts_delta(ts_backfill(<alternative_field/>, <backfill_days/>), <delta_days/>), <std_days/>), <group_field/>)": { |
||||
"template_explanation": "Alternative-data momentum clone: applies the same risk-adjusted change logic to non-traditional data (web-traffic, sentiment, etc.), yielding a smooth, comparable signal that is industry-neutral and robust to data gaps.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "GLB", |
||||
"universe": "TOPDIV3000", |
||||
"delay": 1, |
||||
"decay": 0, |
||||
"neutralization": "STATISTICAL", |
||||
"truncation": 0.01, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2013-01-20", |
||||
"endDate": "2023-01-20" |
||||
}, |
||||
"placeholder_candidates": { |
||||
"<alternative_field/>": { |
||||
"type": "data_field", |
||||
"candidates": [ |
||||
{ |
||||
"id": "fnd23_intfvalld1_qkim", |
||||
"description": "Traffic acquisition costs (TAC) represent the cost for a company to acquire traffic to their site." |
||||
}, |
||||
{ |
||||
"id": "snt22pos_min_377", |
||||
"description": "minimum positive sentiment score." |
||||
}, |
||||
{ |
||||
"id": "mdl106_tre", |
||||
"description": "earnings revenue trend" |
||||
}, |
||||
{ |
||||
"id": "fnd23_intfvalld1_liim", |
||||
"description": "Click Rate, Total - %. It shows the frequency an advertisement downloaded with a webpage is clicked on. This is obtained using the number of clicks on an ad on a webpage, divided by the total number of times that the ad was downloaded with a page." |
||||
}, |
||||
{ |
||||
"id": "headline_sentiment_value", |
||||
"description": "Aggregate sentiment value computed for the news headline." |
||||
} |
||||
] |
||||
}, |
||||
"<backfill_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 14 |
||||
}, |
||||
{ |
||||
"value": 30 |
||||
}, |
||||
{ |
||||
"value": 60 |
||||
} |
||||
] |
||||
}, |
||||
"<delta_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 21 |
||||
}, |
||||
{ |
||||
"value": 42 |
||||
}, |
||||
{ |
||||
"value": 63 |
||||
} |
||||
] |
||||
}, |
||||
"<mean_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 21 |
||||
}, |
||||
{ |
||||
"value": 42 |
||||
}, |
||||
{ |
||||
"value": 63 |
||||
} |
||||
] |
||||
}, |
||||
"<std_days/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 20 |
||||
}, |
||||
{ |
||||
"value": 40 |
||||
}, |
||||
{ |
||||
"value": 60 |
||||
} |
||||
] |
||||
}, |
||||
"<group_field/>": { |
||||
"type": "group_data_field", |
||||
"candidates": [ |
||||
{ |
||||
"name": "industry" |
||||
}, |
||||
{ |
||||
"name": "sector" |
||||
}, |
||||
{ |
||||
"name": "subindustry" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
}, |
||||
"group_rank(ts_corr(ts_delta(ts_backfill(<fundamental_field/>, <bf/>), <d1/>), ts_delta(<price_field/>, <d2/>), <corr_win/>), <group_field/>)": { |
||||
"template_explanation": "Earnings-to-price alignment factor: measures how tightly fundamental momentum co-moves with price momentum; high correlation within group flags firms where the market promptly rewards improving fundamentals, suggesting continued follow-through.", |
||||
"seed_alpha_settings": { |
||||
"instrumentType": "EQUITY", |
||||
"region": "GLB", |
||||
"universe": "TOPDIV3000", |
||||
"delay": 1, |
||||
"decay": 0, |
||||
"neutralization": "STATISTICAL", |
||||
"truncation": 0.01, |
||||
"pasteurization": "ON", |
||||
"unitHandling": "VERIFY", |
||||
"nanHandling": "ON", |
||||
"maxTrade": "OFF", |
||||
"language": "FASTEXPR", |
||||
"visualization": false, |
||||
"startDate": "2013-01-20", |
||||
"endDate": "2023-01-20" |
||||
}, |
||||
"placeholder_candidates": { |
||||
"<fundamental_field/>": { |
||||
"type": "data_field", |
||||
"candidates": [ |
||||
{ |
||||
"id": "ern3_all_delay_1_next_reptime", |
||||
"description": "next report time" |
||||
}, |
||||
{ |
||||
"id": "star_rev_surprise_prediction_fy2", |
||||
"description": "Revenue predicted surprise pct - forward 2 years" |
||||
}, |
||||
{ |
||||
"id": "fnd72_s_pit_or_is_q_net_income", |
||||
"description": "Net Income" |
||||
}, |
||||
{ |
||||
"id": "ebitda_per_share_trailing_twelve_months", |
||||
"description": "EBITDA per share for the trailing twelve months." |
||||
}, |
||||
{ |
||||
"id": "fnd7_ointfund_qfcnif", |
||||
"description": "Quarterly Fundamental Item: Financing Activities -> Net Cash Flow (Statement of Cash Flows)" |
||||
} |
||||
] |
||||
}, |
||||
"<bf/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 1 |
||||
}, |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 21 |
||||
} |
||||
] |
||||
}, |
||||
"<d1/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 21 |
||||
}, |
||||
{ |
||||
"value": 63 |
||||
}, |
||||
{ |
||||
"value": 126 |
||||
} |
||||
] |
||||
}, |
||||
"<price_field/>": { |
||||
"type": "data_field", |
||||
"candidates": [ |
||||
{ |
||||
"id": "close", |
||||
"description": "Daily close price" |
||||
}, |
||||
{ |
||||
"id": "pv37_volume_13", |
||||
"description": "Trading volume" |
||||
}, |
||||
{ |
||||
"id": "srp_average_score", |
||||
"description": "Average score" |
||||
}, |
||||
{ |
||||
"id": "pv37_open_13", |
||||
"description": "Open price" |
||||
}, |
||||
{ |
||||
"id": "open", |
||||
"description": "Daily open price" |
||||
} |
||||
] |
||||
}, |
||||
"<d2/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 5 |
||||
}, |
||||
{ |
||||
"value": 10 |
||||
}, |
||||
{ |
||||
"value": 21 |
||||
}, |
||||
{ |
||||
"value": 42 |
||||
}, |
||||
{ |
||||
"value": 63 |
||||
} |
||||
] |
||||
}, |
||||
"<corr_win/>": { |
||||
"type": "integer_parameter", |
||||
"candidates": [ |
||||
{ |
||||
"value": 20 |
||||
}, |
||||
{ |
||||
"value": 40 |
||||
}, |
||||
{ |
||||
"value": 60 |
||||
}, |
||||
{ |
||||
"value": 120 |
||||
}, |
||||
{ |
||||
"value": 252 |
||||
} |
||||
] |
||||
}, |
||||
"<group_field/>": { |
||||
"type": "group_data_field", |
||||
"candidates": [ |
||||
{ |
||||
"name": "industry" |
||||
}, |
||||
{ |
||||
"name": "subindustry" |
||||
}, |
||||
{ |
||||
"name": "sector" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
} |
||||
} |
||||
@ -0,0 +1 @@ |
||||
[] |
||||
@ -0,0 +1,7 @@ |
||||
[ |
||||
"divide(ts_rank(avg_pct_change_estimate_12m_earnings_7d, 252), add(ts_rank(count_analysts_lower_curr_qtr_earnings_30d, 126), 0.1))", |
||||
"regression_neut(ts_mean(avg_pct_change_estimate_12m_earnings_7d, 66), log(cap))", |
||||
"ts_decay_linear(avg_pct_change_estimate_12m_earnings_7d, 20)", |
||||
"ts_zscore(divide(avg_pct_change_estimate_12m_earnings_7d, add(count_analysts_lower_curr_qtr_earnings_30d, 0.0001)), 126)", |
||||
"group_zscore(ts_mean(avg_pct_change_estimate_12m_earnings_7d, 66), industry)" |
||||
] |
||||
@ -0,0 +1,60 @@ |
||||
|
||||
# parsetab.py |
||||
# This file is automatically generated. Do not edit. |
||||
# pylint: disable=W,C,R |
||||
_tabversion = '3.10' |
||||
|
||||
_lr_method = 'LALR' |
||||
|
||||
_lr_signature = 'ASSIGN BOOLEAN CATEGORY COMMA DIVIDE EQUAL FIELD FUNCTION GREATER GREATEREQUAL IDENTIFIER LESS LESSEQUAL LPAREN MINUS NOTEQUAL NUMBER PLUS RPAREN STRING TIMESexpression : comparison\n | expression EQUAL comparison\n | expression NOTEQUAL comparison\n | expression GREATER comparison\n | expression LESS comparison\n | expression GREATEREQUAL comparison\n | expression LESSEQUAL comparisoncomparison : term\n | comparison PLUS term\n | comparison MINUS termterm : factor\n | term TIMES factor\n | term DIVIDE factorfactor : NUMBER\n | STRING\n | FIELD\n | CATEGORY\n | IDENTIFIER\n | BOOLEAN\n | MINUS factor\n | LPAREN expression RPAREN\n | function_callfunction_call : FUNCTION LPAREN args RPARENargs : arg_list\n | emptyarg_list : arg\n | arg_list COMMA argarg : expression\n | IDENTIFIER ASSIGN expressionempty :' |
||||
|
||||
_lr_action_items = {'NUMBER':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,]),'STRING':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,]),'FIELD':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,]),'CATEGORY':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,]),'IDENTIFIER':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[10,10,10,10,10,10,10,10,10,10,10,10,10,44,44,10,]),'BOOLEAN':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,]),'MINUS':([0,2,3,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19,20,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,44,45,46,47,],[4,22,-8,4,-11,-14,-15,-16,-17,-18,-19,4,-22,4,4,4,4,4,4,4,4,4,4,-20,4,22,22,22,22,22,22,-9,-10,-12,-13,-21,-18,-23,4,4,]),'LPAREN':([0,4,12,14,15,16,17,18,19,20,21,22,23,24,27,46,47,],[12,12,12,27,12,12,12,12,12,12,12,12,12,12,12,12,12,]),'FUNCTION':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,]),'$end':([1,2,3,5,6,7,8,9,10,11,13,25,28,29,30,31,32,33,34,35,36,37,38,45,],[0,-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,-23,]),'EQUAL':([1,2,3,5,6,7,8,9,10,11,13,25,26,28,29,30,31,32,33,34,35,36,37,38,43,44,45,49,],[15,-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,15,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,15,-18,-23,15,]),'NOTEQUAL':([1,2,3,5,6,7,8,9,10,11,13,25,26,28,29,30,31,32,33,34,35,36,37,38,43,44,45,49,],[16,-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,16,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,16,-18,-23,16,]),'GREATER':([1,2,3,5,6,7,8,9,10,11,13,25,26,28,29,30,31,32,33,34,35,36,37,38,43,44,45,49,],[17,-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,17,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,17,-18,-23,17,]),'LESS':([1,2,3,5,6,7,8,9,10,11,13,25,26,28,29,30,31,32,33,34,35,36,37,38,43,44,45,49,],[18,-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,18,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,18,-18,-23,18,]),'GREATEREQUAL':([1,2,3,5,6,7,8,9,10,11,13,25,26,28,29,30,31,32,33,34,35,36,37,38,43,44,45,49,],[19,-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,19,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,19,-18,-23,19,]),'LESSEQUAL':([1,2,3,5,6,7,8,9,10,11,13,25,26,28,29,30,31,32,33,34,35,36,37,38,43,44,45,49,],[20,-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,20,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,20,-18,-23,20,]),'RPAREN':([2,3,5,6,7,8,9,10,11,13,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,48,49,],[-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,38,-30,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,45,-24,-25,-26,-28,-18,-23,-27,-29,]),'COMMA':([2,3,5,6,7,8,9,10,11,13,25,28,29,30,31,32,33,34,35,36,37,38,40,42,43,44,45,48,49,],[-1,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,-2,-3,-4,-5,-6,-7,-9,-10,-12,-13,-21,46,-26,-28,-18,-23,-27,-29,]),'PLUS':([2,3,5,6,7,8,9,10,11,13,25,28,29,30,31,32,33,34,35,36,37,38,44,45,],[21,-8,-11,-14,-15,-16,-17,-18,-19,-22,-20,21,21,21,21,21,21,-9,-10,-12,-13,-21,-18,-23,]),'TIMES':([3,5,6,7,8,9,10,11,13,25,34,35,36,37,38,44,45,],[23,-11,-14,-15,-16,-17,-18,-19,-22,-20,23,23,-12,-13,-21,-18,-23,]),'DIVIDE':([3,5,6,7,8,9,10,11,13,25,34,35,36,37,38,44,45,],[24,-11,-14,-15,-16,-17,-18,-19,-22,-20,24,24,-12,-13,-21,-18,-23,]),'ASSIGN':([44,],[47,]),} |
||||
|
||||
_lr_action = {} |
||||
for _k, _v in _lr_action_items.items(): |
||||
for _x,_y in zip(_v[0],_v[1]): |
||||
if not _x in _lr_action: _lr_action[_x] = {} |
||||
_lr_action[_x][_k] = _y |
||||
del _lr_action_items |
||||
|
||||
_lr_goto_items = {'expression':([0,12,27,46,47,],[1,26,43,43,49,]),'comparison':([0,12,15,16,17,18,19,20,27,46,47,],[2,2,28,29,30,31,32,33,2,2,2,]),'term':([0,12,15,16,17,18,19,20,21,22,27,46,47,],[3,3,3,3,3,3,3,3,34,35,3,3,3,]),'factor':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[5,25,5,5,5,5,5,5,5,5,5,36,37,5,5,5,]),'function_call':([0,4,12,15,16,17,18,19,20,21,22,23,24,27,46,47,],[13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,]),'args':([27,],[39,]),'arg_list':([27,],[40,]),'empty':([27,],[41,]),'arg':([27,46,],[42,48,]),} |
||||
|
||||
_lr_goto = {} |
||||
for _k, _v in _lr_goto_items.items(): |
||||
for _x, _y in zip(_v[0], _v[1]): |
||||
if not _x in _lr_goto: _lr_goto[_x] = {} |
||||
_lr_goto[_x][_k] = _y |
||||
del _lr_goto_items |
||||
_lr_productions = [ |
||||
("S' -> expression","S'",1,None,None,None), |
||||
('expression -> comparison','expression',1,'p_expression','validator.py',405), |
||||
('expression -> expression EQUAL comparison','expression',3,'p_expression','validator.py',406), |
||||
('expression -> expression NOTEQUAL comparison','expression',3,'p_expression','validator.py',407), |
||||
('expression -> expression GREATER comparison','expression',3,'p_expression','validator.py',408), |
||||
('expression -> expression LESS comparison','expression',3,'p_expression','validator.py',409), |
||||
('expression -> expression GREATEREQUAL comparison','expression',3,'p_expression','validator.py',410), |
||||
('expression -> expression LESSEQUAL comparison','expression',3,'p_expression','validator.py',411), |
||||
('comparison -> term','comparison',1,'p_comparison','validator.py',418), |
||||
('comparison -> comparison PLUS term','comparison',3,'p_comparison','validator.py',419), |
||||
('comparison -> comparison MINUS term','comparison',3,'p_comparison','validator.py',420), |
||||
('term -> factor','term',1,'p_term','validator.py',427), |
||||
('term -> term TIMES factor','term',3,'p_term','validator.py',428), |
||||
('term -> term DIVIDE factor','term',3,'p_term','validator.py',429), |
||||
('factor -> NUMBER','factor',1,'p_factor','validator.py',436), |
||||
('factor -> STRING','factor',1,'p_factor','validator.py',437), |
||||
('factor -> FIELD','factor',1,'p_factor','validator.py',438), |
||||
('factor -> CATEGORY','factor',1,'p_factor','validator.py',439), |
||||
('factor -> IDENTIFIER','factor',1,'p_factor','validator.py',440), |
||||
('factor -> BOOLEAN','factor',1,'p_factor','validator.py',441), |
||||
('factor -> MINUS factor','factor',2,'p_factor','validator.py',442), |
||||
('factor -> LPAREN expression RPAREN','factor',3,'p_factor','validator.py',443), |
||||
('factor -> function_call','factor',1,'p_factor','validator.py',444), |
||||
('function_call -> FUNCTION LPAREN args RPAREN','function_call',4,'p_function_call','validator.py',472), |
||||
('args -> arg_list','args',1,'p_args','validator.py',476), |
||||
('args -> empty','args',1,'p_args','validator.py',477), |
||||
('arg_list -> arg','arg_list',1,'p_arg_list','validator.py',484), |
||||
('arg_list -> arg_list COMMA arg','arg_list',3,'p_arg_list','validator.py',485), |
||||
('arg -> expression','arg',1,'p_arg','validator.py',492), |
||||
('arg -> IDENTIFIER ASSIGN expression','arg',3,'p_arg','validator.py',493), |
||||
('empty -> <empty>','empty',0,'p_empty','validator.py',500), |
||||
] |
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1 @@ |
||||
# Alpha Transformer - |
||||
@ -0,0 +1,15 @@ |
||||
{ |
||||
"brain": { |
||||
"username": "jack0210_@hotmail.com", |
||||
"password": "!QAZ2wsx+0913" |
||||
}, |
||||
"llm": { |
||||
"api_key": "sk-cp-l_as8mjqPhsOIny9IFKZ8jzA92z1c0eRwchldhEf4KzQjs9cjVknV2o7VNCcvYUXsXFq7uF4aSgp2RxxmUHLXwPGKgIvzedM70_XUIXiBB3gu_UmLDQLfh4", |
||||
"base_url": "https://api.minimaxi.com/v1", |
||||
"model": "MiniMax-M2.7" |
||||
}, |
||||
"transformer": { |
||||
"top_n_datafield": 30, |
||||
"data_type": "MATRIX" |
||||
} |
||||
} |
||||
@ -0,0 +1,15 @@ |
||||
{ |
||||
"brain": { |
||||
"username": "your_brain_username", |
||||
"password": "your_brain_password" |
||||
}, |
||||
"llm": { |
||||
"api_key": "your_llm_api_key", |
||||
"base_url": "https://api.moonshot.cn/v1", |
||||
"model": "kimi-k2.5" |
||||
}, |
||||
"transformer": { |
||||
"top_n_datafield": 50, |
||||
"data_type": "MATRIX" |
||||
} |
||||
} |
||||
@ -0,0 +1,404 @@ |
||||
# FastAPI 应用主入口 |
||||
from fastapi import FastAPI, Request |
||||
from fastapi.responses import HTMLResponse, JSONResponse, StreamingResponse, FileResponse |
||||
from fastapi.staticfiles import StaticFiles |
||||
from fastapi.templating import Jinja2Templates |
||||
import sys |
||||
import os |
||||
import json |
||||
import subprocess |
||||
import uuid |
||||
import asyncio |
||||
import zipfile |
||||
from datetime import datetime |
||||
|
||||
# 创建 FastAPI 应用实例 |
||||
app = FastAPI(title="Alpha Transformer", version="1.0.0") |
||||
|
||||
# Chrome DevTools 健康检查端点(可选,阻止 404 日志) |
||||
@app.get("/.well-known/appspecific/com.chrome.devtools.json") |
||||
async def chrome_devtools_check(): |
||||
"""Chrome DevTools 健康检查""" |
||||
return {"status": "ok"} |
||||
|
||||
# 提供模板目录中的 CSS 和 JS 文件 |
||||
@app.get("/styles.css") |
||||
async def get_styles(): |
||||
"""提供 styles.css 文件""" |
||||
from fastapi.responses import FileResponse |
||||
return FileResponse("templates/styles.css") |
||||
|
||||
@app.get("/app.js") |
||||
async def get_app_js(): |
||||
"""提供 app.js 文件""" |
||||
from fastapi.responses import FileResponse |
||||
return FileResponse("templates/app.js") |
||||
|
||||
# 配置 Jinja2 模板引擎,用于渲染 HTML 页面 |
||||
templates = Jinja2Templates(directory="templates") |
||||
|
||||
# 存储正在运行的任务信息(task_id -> 任务状态) |
||||
transformer_tasks = {} |
||||
|
||||
# 全局配置变量 |
||||
app_config = {} |
||||
|
||||
def load_config(): |
||||
""" |
||||
加载配置文件 |
||||
读取 config.json 文件,如果有则加载,否则返回空字典 |
||||
""" |
||||
config_path = os.path.join(os.path.dirname(__file__), 'config.json') |
||||
if os.path.exists(config_path): |
||||
try: |
||||
with open(config_path, 'r', encoding='utf-8') as f: |
||||
config = json.load(f) |
||||
print(f"✓ 已加载配置文件: {config_path}") |
||||
return config |
||||
except Exception as e: |
||||
print(f"⚠ 加载配置文件失败: {e}") |
||||
return {} |
||||
else: |
||||
print(f"⚠ 配置文件不存在: {config_path}") |
||||
return {} |
||||
|
||||
# 启动时加载配置 |
||||
app_config = load_config() |
||||
|
||||
|
||||
@app.get("/", response_class=HTMLResponse) |
||||
async def home(): |
||||
# 读取并返回前端首页 HTML |
||||
with open("templates/index.html", "r", encoding="utf-8") as f: |
||||
return f.read() |
||||
|
||||
|
||||
@app.get("/api/config/defaults") |
||||
async def get_config_defaults(): |
||||
""" |
||||
获取默认配置 |
||||
返回 config.json 中的配置作为表单默认值 |
||||
""" |
||||
return JSONResponse(content={ |
||||
"success": True, |
||||
"config": app_config |
||||
}) |
||||
|
||||
|
||||
@app.post("/api/config/save") |
||||
async def save_config(request: Request): |
||||
""" |
||||
保存配置到 config.json |
||||
用于在页面上修改配置后保存 |
||||
""" |
||||
try: |
||||
data = await request.json() |
||||
|
||||
# 更新全局配置 |
||||
global app_config |
||||
app_config = data |
||||
|
||||
# 写入文件 |
||||
config_path = os.path.join(os.path.dirname(__file__), 'config.json') |
||||
with open(config_path, 'w', encoding='utf-8') as f: |
||||
json.dump(data, f, indent=4, ensure_ascii=False) |
||||
|
||||
return JSONResponse(content={ |
||||
"success": True, |
||||
"message": "配置已保存" |
||||
}) |
||||
except Exception as e: |
||||
return JSONResponse( |
||||
status_code=500, |
||||
content={"success": False, "error": str(e)} |
||||
) |
||||
|
||||
|
||||
@app.post("/api/generate") |
||||
async def generate_alpha(request: Request): |
||||
""" |
||||
生成 Alpha 变种的 API 端点 |
||||
接收前端表单数据,启动 Transformer 脚本执行 Alpha 生成任务 |
||||
""" |
||||
print("=" * 50) |
||||
print("收到生成变种请求") |
||||
|
||||
try: |
||||
# 解析请求数据 |
||||
data = await request.json() |
||||
print(f"请求数据: alpha_id={data.get('alpha_id')}, llm_model={data.get('llm_model')}") |
||||
|
||||
# 生成唯一任务 ID |
||||
task_id = str(uuid.uuid4()) |
||||
print(f"生成任务 ID: {task_id}") |
||||
|
||||
# 定义必须提交的字段 |
||||
required_fields = [ |
||||
"alpha_id", |
||||
"llm_api_key", |
||||
"llm_base_url", |
||||
"llm_model", |
||||
"brain_username", |
||||
"brain_password" |
||||
] |
||||
|
||||
# 检查必填字段是否完整 |
||||
for field in required_fields: |
||||
if not data.get(field): |
||||
print(f"缺少必填字段: {field}") |
||||
return JSONResponse( |
||||
status_code=400, |
||||
content={"success": False, "error": f"Missing required field: {field}"} |
||||
) |
||||
|
||||
# 获取脚本所在目录和 Transformer 子目录 |
||||
script_dir = os.path.dirname(os.path.abspath(__file__)) |
||||
transformer_dir = os.path.join(script_dir, 'Tranformer') |
||||
print(f"Transformer 目录: {transformer_dir}") |
||||
|
||||
# 构建传递给 Transformer 脚本的配置 |
||||
config = { |
||||
"LLM_model_name": data.get('llm_model'), |
||||
"LLM_API_KEY": data.get('llm_api_key'), |
||||
"llm_base_url": data.get('llm_base_url'), |
||||
"username": data.get('brain_username'), |
||||
"password": data.get('brain_password'), |
||||
"alpha_id": data.get('alpha_id'), |
||||
"top_n_datafield": int(data.get('top_n_datafield', 50)), |
||||
"user_region": data.get('user_region'), |
||||
"user_universe": data.get('user_universe'), |
||||
"user_delay": int(data.get('user_delay')) if data.get('user_delay') else None, |
||||
"user_category": data.get('user_category'), |
||||
"user_data_type": data.get('user_data_type', 'MATRIX') |
||||
} |
||||
print(f"配置已构建: LLM_model={config['LLM_model_name']}, alpha_id={config['alpha_id']}") |
||||
|
||||
# 将配置写入临时 JSON 文件,供 Transformer 脚本读取 |
||||
config_path = os.path.join(transformer_dir, f'config_{task_id}.json') |
||||
with open(config_path, 'w', encoding='utf-8') as f: |
||||
json.dump(config, f, indent=4) |
||||
print(f"配置文件已写入: {config_path}") |
||||
|
||||
try: |
||||
# 启动 Transformer.py 子进程执行 Alpha 生成 |
||||
print(f"启动 Transformer 脚本...") |
||||
process = subprocess.run( |
||||
[sys.executable, '-u', os.path.join(transformer_dir, 'Transformer.py'), config_path], |
||||
cwd=transformer_dir, |
||||
capture_output=True, |
||||
text=True, |
||||
timeout=600, |
||||
env={**os.environ, "PYTHONIOENCODING": "utf-8"} |
||||
) |
||||
print(f"Transformer 脚本执行完成,返回码: {process.returncode}") |
||||
|
||||
# 定义输出文件路径 |
||||
output_file = os.path.join(transformer_dir, 'output', 'Alpha_generated_expressions_success.json') |
||||
candidates_file = os.path.join(transformer_dir, 'output', 'Alpha_candidates.json') |
||||
error_file = os.path.join(transformer_dir, 'output', 'Alpha_generated_expressions_error.json') |
||||
|
||||
# 构建响应数据 |
||||
result = { |
||||
"success": True, |
||||
"alpha_id": data.get('alpha_id'), |
||||
"stdout": process.stdout, |
||||
"stderr": process.stderr, |
||||
"return_code": process.returncode |
||||
} |
||||
|
||||
# 读取成功生成的表达式 |
||||
if os.path.exists(output_file): |
||||
print(f"读取成功表达式文件: {output_file}") |
||||
with open(output_file, 'r', encoding='utf-8') as f: |
||||
result['expressions_success'] = json.load(f) |
||||
else: |
||||
print(f"成功表达式文件不存在: {output_file}") |
||||
result['expressions_success'] = [] |
||||
|
||||
# 读取候选表达式 |
||||
if os.path.exists(candidates_file): |
||||
print(f"读取候选表达式文件: {candidates_file}") |
||||
with open(candidates_file, 'r', encoding='utf-8') as f: |
||||
result['candidates'] = json.load(f) |
||||
else: |
||||
print(f"候选表达式文件不存在: {candidates_file}") |
||||
result['candidates'] = [] |
||||
|
||||
# 读取生成失败的表达式 |
||||
if os.path.exists(error_file): |
||||
print(f"读取错误表达式文件: {error_file}") |
||||
with open(error_file, 'r', encoding='utf-8') as f: |
||||
result['expressions_error'] = json.load(f) |
||||
else: |
||||
print(f"错误表达式文件不存在: {error_file}") |
||||
result['expressions_error'] = [] |
||||
|
||||
print(f"成功: {len(result['expressions_success'])} 个, 候选: {len(result['candidates'])} 个, 错误: {len(result['expressions_error'])} 个") |
||||
print("=" * 50) |
||||
return JSONResponse(content=result) |
||||
|
||||
finally: |
||||
# 清理临时配置文件 |
||||
if os.path.exists(config_path): |
||||
os.remove(config_path) |
||||
print(f"已清理临时配置文件: {config_path}") |
||||
|
||||
except subprocess.TimeoutExpired: |
||||
print("任务执行超时 (600秒)") |
||||
return JSONResponse( |
||||
status_code=408, |
||||
content={"success": False, "error": "Task timeout (600s)"} |
||||
) |
||||
except Exception as e: |
||||
print(f"执行异常: {str(e)}") |
||||
return JSONResponse( |
||||
status_code=500, |
||||
content={"success": False, "error": str(e)} |
||||
) |
||||
|
||||
|
||||
@app.post("/api/transformer/login-and-fetch-options") |
||||
async def login_and_fetch_options(request: Request): |
||||
""" |
||||
登录 BRAIN 并获取地区、Delay、Universe、类别等选项 |
||||
用于填充高级选项表单 |
||||
""" |
||||
try: |
||||
data = await request.json() |
||||
username = data.get('username') |
||||
password = data.get('password') |
||||
|
||||
if not username or not password: |
||||
return JSONResponse( |
||||
status_code=400, |
||||
content={'success': False, 'error': 'Username and password are required'} |
||||
) |
||||
|
||||
# 添加 Transformer 目录到 sys.path |
||||
script_dir = os.path.dirname(os.path.abspath(__file__)) |
||||
transformer_dir = os.path.join(script_dir, 'Tranformer') |
||||
if transformer_dir not in sys.path: |
||||
sys.path.append(transformer_dir) |
||||
|
||||
# 导入必要的模块 |
||||
from Tranformer.ace_lib import SingleSession, get_instrument_type_region_delay |
||||
import pandas as pd |
||||
|
||||
# 创建新的会话实例 |
||||
session = SingleSession() |
||||
session.auth = (username, password) |
||||
|
||||
brain_api_url = "https://api.worldquantbrain.com" |
||||
response = session.post(brain_api_url + "/authentication") |
||||
|
||||
if response.status_code == 201: |
||||
# 认证成功 |
||||
pass |
||||
elif response.status_code == 401: |
||||
return JSONResponse( |
||||
status_code=401, |
||||
content={'success': False, 'error': 'Authentication failed: Invalid credentials'} |
||||
) |
||||
else: |
||||
return JSONResponse( |
||||
status_code=400, |
||||
content={'success': False, 'error': f'Authentication failed: {response.status_code}'} |
||||
) |
||||
|
||||
# 获取 region/delay/universe 选项 |
||||
df = get_instrument_type_region_delay(session) |
||||
|
||||
# 获取数据类别 |
||||
categories_resp = session.get(brain_api_url + "/data-categories") |
||||
categories = [] |
||||
if categories_resp.status_code == 200: |
||||
categories_data = categories_resp.json() |
||||
if isinstance(categories_data, list): |
||||
categories = categories_data |
||||
elif isinstance(categories_data, dict): |
||||
categories = categories_data.get('results', []) |
||||
|
||||
# 转换 DataFrame 为前端需要的嵌套字典结构 |
||||
# 结构: Region -> Delay -> Universe |
||||
df_equity = df[df['InstrumentType'] == 'EQUITY'] |
||||
|
||||
options = {} |
||||
for _, row in df_equity.iterrows(): |
||||
region = row['Region'] |
||||
delay = row['Delay'] |
||||
universes = row['Universe'] # 这是一个列表 |
||||
|
||||
if region not in options: |
||||
options[region] = {} |
||||
|
||||
# 将 delay 转换为字符串作为字典的键 |
||||
delay_str = str(delay) |
||||
if delay_str not in options[region]: |
||||
options[region][delay_str] = universes |
||||
|
||||
return JSONResponse(content={ |
||||
'success': True, |
||||
'options': options, |
||||
'categories': categories |
||||
}) |
||||
|
||||
except Exception as e: |
||||
print(f"登录获取选项失败: {str(e)}") |
||||
return JSONResponse( |
||||
status_code=500, |
||||
content={'success': False, 'error': str(e)} |
||||
) |
||||
|
||||
|
||||
@app.get("/api/health") |
||||
async def health_check(): |
||||
"""健康检查端点,用于验证服务是否正常运行""" |
||||
return {"status": "healthy", "service": "alpha-transformer"} |
||||
|
||||
|
||||
@app.get("/api/download/{alpha_id}") |
||||
async def download_results(alpha_id: str): |
||||
""" |
||||
下载生成结果的 zip 压缩包 |
||||
包含三个 JSON 文件:success, candidates, error |
||||
""" |
||||
try: |
||||
transformer_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Tranformer') |
||||
output_dir = os.path.join(transformer_dir, 'output') |
||||
|
||||
# 检查文件是否存在 |
||||
files_to_zip = { |
||||
'Alpha_generated_expressions_success.json': os.path.join(output_dir, 'Alpha_generated_expressions_success.json'), |
||||
'Alpha_candidates.json': os.path.join(output_dir, 'Alpha_candidates.json'), |
||||
'Alpha_generated_expressions_error.json': os.path.join(output_dir, 'Alpha_generated_expressions_error.json') |
||||
} |
||||
|
||||
# 生成时间戳 |
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
||||
zip_filename = f"{alpha_id}_{timestamp}.zip" |
||||
zip_path = os.path.join(output_dir, zip_filename) |
||||
|
||||
# 创建 zip 文件 |
||||
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: |
||||
for arcname, filepath in files_to_zip.items(): |
||||
if os.path.exists(filepath): |
||||
zipf.write(filepath, arcname) |
||||
|
||||
# 返回文件 |
||||
return FileResponse( |
||||
zip_path, |
||||
media_type='application/zip', |
||||
filename=zip_filename |
||||
) |
||||
|
||||
except Exception as e: |
||||
return JSONResponse( |
||||
status_code=500, |
||||
content={"success": False, "error": str(e)} |
||||
) |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
# 启动 FastAPI 应用,使用 uvicorn 作为 ASGI 服务器 |
||||
import uvicorn |
||||
uvicorn.run(app, host="0.0.0.0", port=8000) |
||||
@ -0,0 +1,7 @@ |
||||
fastapi>=0.100.0 |
||||
uvicorn>=0.20.0 |
||||
requests>=2.28.0 |
||||
openai>=1.0.0 |
||||
pandas>=2.0.0 |
||||
pydantic>=2.0.0 |
||||
jinja2>=3.0.0 |
||||
File diff suppressed because one or more lines are too long
@ -0,0 +1,290 @@ |
||||
document.addEventListener('DOMContentLoaded', async () => { |
||||
try { |
||||
const response = await fetch('/api/config/defaults'); |
||||
const result = await response.json(); |
||||
|
||||
if (result.success && result.config) { |
||||
const config = result.config; |
||||
|
||||
if (config.brain) { |
||||
if (config.brain.username) { |
||||
document.getElementById('brainUsername').value = config.brain.username; |
||||
} |
||||
if (config.brain.password) { |
||||
document.getElementById('brainPassword').value = config.brain.password; |
||||
} |
||||
} |
||||
|
||||
if (config.llm) { |
||||
if (config.llm.api_key) { |
||||
document.getElementById('llmApiKey').value = config.llm.api_key; |
||||
} |
||||
if (config.llm.base_url) { |
||||
document.getElementById('llmBaseUrl').value = config.llm.base_url; |
||||
} |
||||
if (config.llm.model) { |
||||
document.getElementById('llmModel').value = config.llm.model; |
||||
} |
||||
} |
||||
|
||||
if (config.transformer) { |
||||
if (config.transformer.top_n_datafield) { |
||||
document.getElementById('topNDatafield').value = config.transformer.top_n_datafield; |
||||
} |
||||
if (config.transformer.data_type) { |
||||
document.getElementById('dataType').value = config.transformer.data_type; |
||||
} |
||||
} |
||||
} |
||||
} catch (error) { |
||||
console.error('加载默认配置失败:', error); |
||||
} |
||||
}); |
||||
|
||||
const form = document.getElementById('transformerForm'); |
||||
const submitBtn = document.getElementById('submitBtn'); |
||||
const downloadBtn = document.getElementById('downloadBtn'); |
||||
const loginAndFetchBtn = document.getElementById('loginAndFetchBtn'); |
||||
const regionSelect = document.getElementById('region'); |
||||
const delaySelect = document.getElementById('delay'); |
||||
const universeSelect = document.getElementById('universe'); |
||||
const dataTypeSelect = document.getElementById('dataType'); |
||||
const categoryButtons = document.getElementById('category-buttons'); |
||||
|
||||
let optionsData = {}; |
||||
|
||||
dataTypeSelect.addEventListener('change', function() { |
||||
if (this.value === 'VECTOR') { |
||||
if (!confirm("请确保您输入的原型Alpha中正确地使用了vector operator,否则极容易造成数据类型错误!")) { |
||||
this.value = 'MATRIX'; |
||||
} |
||||
} |
||||
}); |
||||
|
||||
loginAndFetchBtn.addEventListener('click', async () => { |
||||
const username = document.getElementById('brainUsername').value.trim(); |
||||
const password = document.getElementById('brainPassword').value; |
||||
|
||||
if (!username || !password) { |
||||
alert('请先填写BRAIN用户名和密码'); |
||||
return; |
||||
} |
||||
|
||||
loginAndFetchBtn.disabled = true; |
||||
loginAndFetchBtn.textContent = '正在登录...'; |
||||
|
||||
try { |
||||
const response = await fetch('/api/transformer/login-and-fetch-options', { |
||||
method: 'POST', |
||||
headers: { 'Content-Type': 'application/json' }, |
||||
body: JSON.stringify({ username, password }) |
||||
}); |
||||
|
||||
const result = await response.json(); |
||||
|
||||
if (result.success) { |
||||
optionsData = result.options; |
||||
|
||||
populateRegionSelect(); |
||||
regionSelect.disabled = false; |
||||
|
||||
if (result.categories) { |
||||
populateCategoryButtons(result.categories); |
||||
} |
||||
|
||||
loginAndFetchBtn.textContent = '登录成功'; |
||||
} else { |
||||
alert('登录失败: ' + result.error); |
||||
loginAndFetchBtn.disabled = false; |
||||
loginAndFetchBtn.textContent = '登录BRAIN并获取选项'; |
||||
} |
||||
} catch (error) { |
||||
alert('登录出错: ' + error.message); |
||||
loginAndFetchBtn.disabled = false; |
||||
loginAndFetchBtn.textContent = '登录BRAIN并获取选项'; |
||||
} |
||||
}); |
||||
|
||||
function populateRegionSelect() { |
||||
while (regionSelect.options.length > 1) { |
||||
regionSelect.remove(1); |
||||
} |
||||
|
||||
const regions = Object.keys(optionsData); |
||||
regions.forEach(region => { |
||||
const option = document.createElement('option'); |
||||
option.value = region; |
||||
option.textContent = region; |
||||
regionSelect.appendChild(option); |
||||
}); |
||||
} |
||||
|
||||
function populateCategoryButtons(categories) { |
||||
categories.forEach(category => { |
||||
const btn = document.createElement('button'); |
||||
btn.type = 'button'; |
||||
btn.dataset.value = category.id || category; |
||||
btn.textContent = category.name || category; |
||||
btn.onclick = function() { toggleCategory(this); }; |
||||
btn.className = 'btn'; |
||||
btn.style.cssText = 'padding: 4px 12px; font-size: 11px;'; |
||||
categoryButtons.appendChild(btn); |
||||
}); |
||||
} |
||||
|
||||
function toggleCategory(btn) { |
||||
const allBtn = document.getElementById('cat-all'); |
||||
const isAllBtn = (btn === allBtn); |
||||
|
||||
if (isAllBtn) { |
||||
allBtn.style.backgroundColor = '#000080'; |
||||
allBtn.style.color = 'white'; |
||||
|
||||
const otherBtns = categoryButtons.querySelectorAll('button:not(#cat-all)'); |
||||
otherBtns.forEach(b => { |
||||
b.style.backgroundColor = '#c0c0c0'; |
||||
b.style.color = 'black'; |
||||
}); |
||||
} else { |
||||
if (btn.style.backgroundColor === 'rgb(0, 0, 128)') { |
||||
btn.style.backgroundColor = '#c0c0c0'; |
||||
btn.style.color = 'black'; |
||||
} else { |
||||
btn.style.backgroundColor = '#000080'; |
||||
btn.style.color = 'white'; |
||||
} |
||||
|
||||
const anySelected = categoryButtons.querySelectorAll('button:not(#cat-all)'); |
||||
let hasSelected = false; |
||||
anySelected.forEach(b => { |
||||
if (b.style.backgroundColor === 'rgb(0, 0, 128)') { |
||||
hasSelected = true; |
||||
} |
||||
}); |
||||
|
||||
if (hasSelected) { |
||||
allBtn.style.backgroundColor = '#c0c0c0'; |
||||
allBtn.style.color = 'black'; |
||||
} else { |
||||
allBtn.style.backgroundColor = '#000080'; |
||||
allBtn.style.color = 'white'; |
||||
} |
||||
} |
||||
} |
||||
|
||||
regionSelect.addEventListener('change', () => { |
||||
const selectedRegion = regionSelect.value; |
||||
|
||||
delaySelect.innerHTML = '<option value="">-- 先选择地区 --</option>'; |
||||
universeSelect.innerHTML = '<option value="">-- 先选择Delay --</option>'; |
||||
delaySelect.disabled = true; |
||||
universeSelect.disabled = true; |
||||
|
||||
if (selectedRegion && optionsData[selectedRegion]) { |
||||
const delays = Object.keys(optionsData[selectedRegion]); |
||||
delays.forEach(delay => { |
||||
const option = document.createElement('option'); |
||||
option.value = delay; |
||||
option.textContent = delay; |
||||
delaySelect.appendChild(option); |
||||
}); |
||||
delaySelect.disabled = false; |
||||
} |
||||
}); |
||||
|
||||
delaySelect.addEventListener('change', () => { |
||||
const selectedRegion = regionSelect.value; |
||||
const selectedDelay = delaySelect.value; |
||||
|
||||
universeSelect.innerHTML = '<option value="">-- 先选择Delay --</option>'; |
||||
universeSelect.disabled = true; |
||||
|
||||
if (selectedRegion && selectedDelay && optionsData[selectedRegion][selectedDelay]) { |
||||
const universes = optionsData[selectedRegion][selectedDelay]; |
||||
universes.forEach(universe => { |
||||
const option = document.createElement('option'); |
||||
option.value = universe; |
||||
option.textContent = universe; |
||||
universeSelect.appendChild(option); |
||||
}); |
||||
universeSelect.disabled = false; |
||||
} |
||||
}); |
||||
|
||||
form.addEventListener('submit', async (e) => { |
||||
e.preventDefault(); |
||||
|
||||
const formData = { |
||||
alpha_id: document.getElementById('alphaId').value.trim(), |
||||
llm_api_key: document.getElementById('llmApiKey').value.trim(), |
||||
llm_base_url: document.getElementById('llmBaseUrl').value.trim(), |
||||
llm_model: document.getElementById('llmModel').value.trim(), |
||||
brain_username: document.getElementById('brainUsername').value.trim(), |
||||
brain_password: document.getElementById('brainPassword').value.trim(), |
||||
top_n_datafield: parseInt(document.getElementById('topNDatafield').value) || 50, |
||||
data_type: document.getElementById('dataType').value || 'MATRIX' |
||||
}; |
||||
|
||||
const region = document.getElementById('region').value; |
||||
const delay = document.getElementById('delay').value; |
||||
const universe = document.getElementById('universe').value; |
||||
|
||||
if (region) formData.user_region = region; |
||||
if (delay) formData.user_delay = parseInt(delay); |
||||
if (universe) formData.user_universe = universe; |
||||
|
||||
const allBtn = document.getElementById('cat-all'); |
||||
let selectedCategories = []; |
||||
|
||||
if (allBtn.style.backgroundColor !== 'rgb(0, 0, 128)') { |
||||
const categoryBtns = categoryButtons.querySelectorAll('button:not(#cat-all)'); |
||||
categoryBtns.forEach(btn => { |
||||
if (btn.style.backgroundColor === 'rgb(0, 0, 128)') { |
||||
selectedCategories.push(btn.dataset.value); |
||||
} |
||||
}); |
||||
|
||||
if (selectedCategories.length > 0) { |
||||
formData.user_category = selectedCategories; |
||||
} |
||||
} |
||||
|
||||
submitBtn.disabled = true; |
||||
submitBtn.textContent = '处理中...'; |
||||
|
||||
try { |
||||
const response = await fetch('/api/generate', { |
||||
method: 'POST', |
||||
headers: { |
||||
'Content-Type': 'application/json' |
||||
}, |
||||
body: JSON.stringify(formData) |
||||
}); |
||||
|
||||
const result = await response.json(); |
||||
|
||||
if (result.success) { |
||||
const successCount = result.expressions_success ? result.expressions_success.length : 0; |
||||
const candidateCount = result.candidates ? result.candidates.length : 0; |
||||
const errorCount = result.expressions_error ? result.expressions_error.length : 0; |
||||
alert('生成完成!成功: ' + successCount + ' 个, 候选: ' + candidateCount + ' 个, 错误: ' + errorCount + ' 个'); |
||||
|
||||
// 显示下载按钮
|
||||
downloadBtn.style.display = 'block'; |
||||
downloadBtn.onclick = function() { |
||||
const alphaId = document.getElementById('alphaId').value.trim(); |
||||
window.location.href = '/api/download/' + alphaId; |
||||
}; |
||||
} else { |
||||
alert('生成失败: ' + (result.error || '未知错误')); |
||||
downloadBtn.style.display = 'none'; |
||||
} |
||||
|
||||
} catch (error) { |
||||
alert('请求失败: ' + error.message); |
||||
downloadBtn.style.display = 'none'; |
||||
} finally { |
||||
submitBtn.disabled = false; |
||||
submitBtn.textContent = '生成变种'; |
||||
} |
||||
}); |
||||
@ -0,0 +1,148 @@ |
||||
<!DOCTYPE html> |
||||
<html lang="zh-CN"> |
||||
<head> |
||||
<meta charset="UTF-8"> |
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0"> |
||||
<title>Alpha Transformer - </title> |
||||
<link rel="stylesheet" href="/styles.css"> |
||||
</head> |
||||
<body> |
||||
<div class="container"> |
||||
<div class="header"> |
||||
<h1>Alpha Transformer</h1> |
||||
<p> - AI驱动的Alpha变种生成器</p> |
||||
</div> |
||||
|
||||
<div class="content"> |
||||
<div class="instructions"> |
||||
<h3>使用说明</h3> |
||||
<ul> |
||||
<li>输入您的种子Alpha ID,系统将基于BRAIN平台生成多个变种</li> |
||||
<li>配置您的LLM(支持OpenAI兼容接口)和BRAIN凭证</li> |
||||
<li>点击生成按钮,系统将自动完成所有处理并返回结果</li> |
||||
</ul> |
||||
</div> |
||||
|
||||
<form id="transformerForm"> |
||||
<div class="form-section"> |
||||
<h2>BRAIN 凭证</h2> |
||||
|
||||
<div class="form-row"> |
||||
<div class="form-group"> |
||||
<label for="brainUsername">用户名 *</label> |
||||
<input type="text" id="brainUsername" name="brain_username" required |
||||
placeholder="BRAIN平台用户名"> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label for="brainPassword">密码 *</label> |
||||
<input type="password" id="brainPassword" name="brain_password" required |
||||
placeholder="BRAIN平台密码"> |
||||
</div> |
||||
</div> |
||||
|
||||
<button type="button" class="btn" id="loginAndFetchBtn" style="margin-top: 10px;"> |
||||
登录BRAIN并获取选项 |
||||
</button> |
||||
</div> |
||||
|
||||
<div class="form-section"> |
||||
<h2>基本信息</h2> |
||||
|
||||
<div class="form-group"> |
||||
<label for="alphaId">Alpha ID *</label> |
||||
<input type="text" id="alphaId" name="alpha_id" required |
||||
placeholder="例如: ak2YPVxv"> |
||||
</div> |
||||
</div> |
||||
|
||||
<div class="form-section"> |
||||
<h2>LLM 配置</h2> |
||||
|
||||
<div class="form-row"> |
||||
<div class="form-group"> |
||||
<label for="llmApiKey">API Key *</label> |
||||
<input type="password" id="llmApiKey" name="llm_api_key" required |
||||
placeholder="您的LLM API密钥"> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label for="llmBaseUrl">Base URL *</label> |
||||
<input type="text" id="llmBaseUrl" name="llm_base_url" required |
||||
value="https://api.moonshot.cn/v1" |
||||
placeholder="例如: https://api.moonshot.cn/v1"> |
||||
</div> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label for="llmModel">模型名称 *</label> |
||||
<input type="text" id="llmModel" name="llm_model" required |
||||
value="kimi-k2.5" |
||||
placeholder="例如: kimi-k2.5, gpt-4"> |
||||
</div> |
||||
</div> |
||||
|
||||
<div class="form-section"> |
||||
<h2>高级选项(可选)</h2> |
||||
|
||||
<div class="form-group"> |
||||
<label for="topNDatafield">Datafield Top N (候选数量,建议低于50)</label> |
||||
<input type="number" id="topNDatafield" name="top_n_datafield" |
||||
value="50" min="1" max="100"> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label for="dataType">数据类型 (Data Type)</label> |
||||
<select id="dataType" name="data_type"> |
||||
<option value="MATRIX" selected>MATRIX</option> |
||||
<option value="VECTOR">VECTOR</option> |
||||
</select> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label>目标地区 (可选 - 留空则使用种子Alpha的默认值)</label> |
||||
<select id="region" name="region" disabled> |
||||
<option value="">-- 登录后获取选项 --</option> |
||||
</select> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label for="delay">目标Delay (可选 - 留空则使用种子Alpha的默认值)</label> |
||||
<select id="delay" name="delay" disabled> |
||||
<option value="">-- 先选择地区 --</option> |
||||
</select> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label for="universe">目标股票池 (可选 - 留空则使用种子Alpha的默认值)</label> |
||||
<select id="universe" name="universe" disabled> |
||||
<option value="">-- 先选择Delay --</option> |
||||
</select> |
||||
</div> |
||||
|
||||
<div class="form-group"> |
||||
<label>目标数据类别 (可多选)</label> |
||||
<div id="category-buttons"> |
||||
<button type="button" class="btn" id="cat-all" data-value="" onclick="toggleCategory(this)"> |
||||
不筛选 (默认) |
||||
</button> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
|
||||
<button type="submit" class="btn submit-btn" id="submitBtn"> |
||||
生成变种 |
||||
</button> |
||||
|
||||
<button type="button" class="btn download-btn" id="downloadBtn" style="display: none; margin-top: 10px; background: linear-gradient(135deg, var(--monokai-cyan) 0%, #4fc3f7 100%);"> |
||||
下载结果 (ZIP) |
||||
</button> |
||||
</form> |
||||
|
||||
|
||||
</div> |
||||
</div> |
||||
|
||||
<script src="/app.js"></script> |
||||
</body> |
||||
</html> |
||||
@ -0,0 +1,343 @@ |
||||
/* Monokai Theme for Alpha Transformer */ |
||||
|
||||
* { |
||||
margin: 0; |
||||
padding: 0; |
||||
box-sizing: border-box; |
||||
} |
||||
|
||||
/* Monokai Color Palette */ |
||||
:root { |
||||
--monokai-bg: #272822; |
||||
--monokai-fg: #f8f8f2; |
||||
--monokai-comment: #75715e; |
||||
--monokai-red: #f92672; |
||||
--monokai-orange: #fd971f; |
||||
--monokai-yellow: #e6db74; |
||||
--monokai-green: #a6e22e; |
||||
--monokai-cyan: #66d9ef; |
||||
--monokai-blue: #268bd2; |
||||
--monokai-purple: #ae81ff; |
||||
--monokai-dark: #1e1f1c; |
||||
--monokai-border: #3e3d32; |
||||
} |
||||
|
||||
body { |
||||
font-family: 'Consolas', 'Monaco', 'Courier New', monospace; |
||||
font-size: 14px; |
||||
background: var(--monokai-dark); |
||||
min-height: 100vh; |
||||
padding: 20px; |
||||
color: var(--monokai-fg); |
||||
} |
||||
|
||||
.container { |
||||
max-width: 900px; |
||||
margin: 0 auto; |
||||
background: var(--monokai-bg); |
||||
border: 2px solid var(--monokai-border); |
||||
border-radius: 8px; |
||||
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.5); |
||||
} |
||||
|
||||
.header { |
||||
background: linear-gradient(135deg, var(--monokai-dark) 0%, var(--monokai-bg) 100%); |
||||
color: var(--monokai-green); |
||||
padding: 20px; |
||||
font-weight: bold; |
||||
font-size: 18px; |
||||
border-bottom: 2px solid var(--monokai-border); |
||||
border-radius: 8px 8px 0 0; |
||||
} |
||||
|
||||
.header h1 { |
||||
color: var(--monokai-green); |
||||
text-shadow: 0 0 10px rgba(166, 226, 46, 0.3); |
||||
} |
||||
|
||||
.header p { |
||||
color: var(--monokai-comment); |
||||
font-size: 14px; |
||||
margin-top: 5px; |
||||
} |
||||
|
||||
.content { |
||||
padding: 20px; |
||||
} |
||||
|
||||
.form-section { |
||||
margin-bottom: 20px; |
||||
background: var(--monokai-dark); |
||||
border: 1px solid var(--monokai-border); |
||||
border-radius: 6px; |
||||
padding: 20px; |
||||
} |
||||
|
||||
.form-section h2 { |
||||
color: var(--monokai-cyan); |
||||
margin-bottom: 15px; |
||||
padding-bottom: 10px; |
||||
border-bottom: 1px solid var(--monokai-border); |
||||
font-size: 16px; |
||||
font-weight: bold; |
||||
} |
||||
|
||||
.form-group { |
||||
margin-bottom: 15px; |
||||
} |
||||
|
||||
.form-group label { |
||||
display: block; |
||||
margin-bottom: 8px; |
||||
color: var(--monokai-orange); |
||||
font-weight: normal; |
||||
font-size: 13px; |
||||
} |
||||
|
||||
.form-group input, |
||||
.form-group select { |
||||
width: 100%; |
||||
padding: 10px 12px; |
||||
border: 1px solid var(--monokai-border); |
||||
border-radius: 4px; |
||||
font-family: 'Consolas', 'Monaco', 'Courier New', monospace; |
||||
font-size: 13px; |
||||
background: var(--monokai-bg); |
||||
color: var(--monokai-fg); |
||||
transition: all 0.3s ease; |
||||
} |
||||
|
||||
.form-group input:focus, |
||||
.form-group select:focus { |
||||
outline: none; |
||||
border-color: var(--monokai-cyan); |
||||
box-shadow: 0 0 0 2px rgba(102, 217, 239, 0.2); |
||||
} |
||||
|
||||
.form-group input::placeholder { |
||||
color: var(--monokai-comment); |
||||
} |
||||
|
||||
.form-row { |
||||
display: grid; |
||||
grid-template-columns: 1fr 1fr; |
||||
gap: 15px; |
||||
} |
||||
|
||||
.btn { |
||||
padding: 10px 20px; |
||||
background: var(--monokai-purple); |
||||
color: var(--monokai-bg); |
||||
border: none; |
||||
border-radius: 4px; |
||||
font-family: 'Consolas', 'Monaco', 'Courier New', monospace; |
||||
font-size: 13px; |
||||
font-weight: bold; |
||||
cursor: pointer; |
||||
transition: all 0.3s ease; |
||||
} |
||||
|
||||
.btn:hover { |
||||
background: #c5a3ff; |
||||
transform: translateY(-1px); |
||||
box-shadow: 0 4px 12px rgba(174, 129, 255, 0.3); |
||||
} |
||||
|
||||
.btn:active { |
||||
transform: translateY(0); |
||||
} |
||||
|
||||
.btn:disabled { |
||||
background: var(--monokai-comment); |
||||
color: var(--monokai-dark); |
||||
cursor: not-allowed; |
||||
transform: none; |
||||
box-shadow: none; |
||||
} |
||||
|
||||
.submit-btn { |
||||
width: 100%; |
||||
padding: 14px 28px; |
||||
background: linear-gradient(135deg, var(--monokai-green) 0%, #8bc34a 100%); |
||||
color: var(--monokai-bg); |
||||
border: none; |
||||
border-radius: 6px; |
||||
font-size: 15px; |
||||
font-weight: bold; |
||||
margin-top: 10px; |
||||
text-transform: uppercase; |
||||
letter-spacing: 1px; |
||||
} |
||||
|
||||
.submit-btn:hover { |
||||
background: linear-gradient(135deg, #b8e068 0%, #9ccc65 100%); |
||||
box-shadow: 0 4px 15px rgba(166, 226, 46, 0.4); |
||||
} |
||||
|
||||
.instructions { |
||||
background: var(--monokai-dark); |
||||
border: 1px solid var(--monokai-border); |
||||
border-radius: 6px; |
||||
padding: 20px; |
||||
margin-bottom: 20px; |
||||
} |
||||
|
||||
.instructions h3 { |
||||
color: var(--monokai-yellow); |
||||
margin-bottom: 12px; |
||||
font-size: 15px; |
||||
font-weight: bold; |
||||
} |
||||
|
||||
.instructions ul { |
||||
color: var(--monokai-fg); |
||||
padding-left: 25px; |
||||
} |
||||
|
||||
.instructions li { |
||||
margin-bottom: 8px; |
||||
line-height: 1.6; |
||||
} |
||||
|
||||
.instructions li::marker { |
||||
color: var(--monokai-red); |
||||
} |
||||
|
||||
#category-buttons { |
||||
display: flex; |
||||
flex-wrap: wrap; |
||||
gap: 8px; |
||||
padding: 12px; |
||||
border: 1px solid var(--monokai-border); |
||||
border-radius: 4px; |
||||
background: var(--monokai-bg); |
||||
} |
||||
|
||||
/* Loading animation */ |
||||
.loading { |
||||
display: none; |
||||
text-align: center; |
||||
padding: 40px; |
||||
background: var(--monokai-dark); |
||||
border: 1px solid var(--monokai-border); |
||||
border-radius: 6px; |
||||
margin-top: 20px; |
||||
} |
||||
|
||||
.loading.active { |
||||
display: block; |
||||
} |
||||
|
||||
.spinner { |
||||
border: 3px solid var(--monokai-border); |
||||
border-top: 3px solid var(--monokai-cyan); |
||||
border-radius: 50%; |
||||
width: 40px; |
||||
height: 40px; |
||||
animation: spin 1s linear infinite; |
||||
margin: 0 auto 15px; |
||||
} |
||||
|
||||
@keyframes spin { |
||||
0% { transform: rotate(0deg); } |
||||
100% { transform: rotate(360deg); } |
||||
} |
||||
|
||||
/* Error message */ |
||||
.error-message { |
||||
background: rgba(249, 38, 114, 0.1); |
||||
color: var(--monokai-red); |
||||
padding: 15px; |
||||
margin-top: 15px; |
||||
border: 1px solid var(--monokai-red); |
||||
border-radius: 4px; |
||||
display: none; |
||||
font-weight: bold; |
||||
} |
||||
|
||||
.error-message.active { |
||||
display: block; |
||||
} |
||||
|
||||
/* Result section */ |
||||
.result-section { |
||||
margin-top: 20px; |
||||
display: none; |
||||
} |
||||
|
||||
.result-section.active { |
||||
display: block; |
||||
} |
||||
|
||||
.result-header { |
||||
display: flex; |
||||
justify-content: space-between; |
||||
align-items: center; |
||||
margin-bottom: 10px; |
||||
padding: 12px; |
||||
background: var(--monokai-dark); |
||||
color: var(--monokai-green); |
||||
font-weight: bold; |
||||
border: 1px solid var(--monokai-border); |
||||
border-radius: 4px 4px 0 0; |
||||
} |
||||
|
||||
.result-content { |
||||
background: var(--monokai-bg); |
||||
border: 1px solid var(--monokai-border); |
||||
border-radius: 0 0 4px 4px; |
||||
padding: 15px; |
||||
max-height: 400px; |
||||
overflow-y: auto; |
||||
} |
||||
|
||||
.result-content pre { |
||||
white-space: pre-wrap; |
||||
word-wrap: break-word; |
||||
font-family: 'Consolas', 'Monaco', 'Courier New', monospace; |
||||
font-size: 12px; |
||||
color: var(--monokai-fg); |
||||
line-height: 1.5; |
||||
} |
||||
|
||||
.btn-copy { |
||||
padding: 6px 14px; |
||||
background: var(--monokai-cyan); |
||||
color: var(--monokai-bg); |
||||
border: none; |
||||
border-radius: 4px; |
||||
font-size: 12px; |
||||
font-weight: bold; |
||||
cursor: pointer; |
||||
transition: all 0.3s ease; |
||||
} |
||||
|
||||
.btn-copy:hover { |
||||
background: #87e8f5; |
||||
box-shadow: 0 2px 8px rgba(102, 217, 239, 0.3); |
||||
} |
||||
|
||||
/* Scrollbar styling */ |
||||
::-webkit-scrollbar { |
||||
width: 10px; |
||||
height: 10px; |
||||
} |
||||
|
||||
::-webkit-scrollbar-track { |
||||
background: var(--monokai-dark); |
||||
} |
||||
|
||||
::-webkit-scrollbar-thumb { |
||||
background: var(--monokai-comment); |
||||
border-radius: 5px; |
||||
} |
||||
|
||||
::-webkit-scrollbar-thumb:hover { |
||||
background: var(--monokai-purple); |
||||
} |
||||
|
||||
/* Selection styling */ |
||||
::selection { |
||||
background: var(--monokai-purple); |
||||
color: var(--monokai-bg); |
||||
} |
||||
Loading…
Reference in new issue