You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 
alpha_tools/decode_template/decode_template.py

1612 lines
71 KiB

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
LLM 结果处理脚本 - 合并版本
从 LLM 生成的 Markdown 结果中提取 templates,生成并验证 Alpha 表达式
使用方法:
将 llm_result.md 和 dataset.csv 放在当前目录,然后运行:
python decode_template.py
输出:
在当前目录创建 output/ 文件夹,包含生成的表达式
"""
import json
import itertools
import re
import sys
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
# ==================== 表达式处理工具函数 ====================
def detect_dataset_code(dataset_ids: List[str]) -> Optional[str]:
"""从字段ID中检测数据集代码前缀"""
if not dataset_ids:
return None
counts: Dict[str, int] = {}
for fid in dataset_ids:
tok = (str(fid).split("_", 1)[0] or "").strip()
if tok:
counts[tok] = counts.get(tok, 0) + 1
if not counts:
return None
return max(counts.items(), key=lambda kv: kv[1])[0]
def build_allowed_metric_suffixes(field_ids: List[str], max_suffixes: int = 300) -> List[str]:
"""从数据集字段ID中提取实用的占位符候选列表"""
if not field_ids:
return []
# 过滤并转换字段ID
field_ids = [str(fid) for fid in field_ids if fid]
dataset_code = detect_dataset_code(field_ids)
counts: Dict[str, int] = {}
for raw in field_ids:
parts = [p for p in str(raw).split("_") if p]
if len(parts) < 2:
continue
for n in range(1, min(6, len(parts))):
suffix = "_".join(parts[-n:])
if suffix.replace("_", "").isdigit():
continue
if dataset_code and suffix.lower().startswith(dataset_code.lower() + "_"):
continue
if n == 1 and len(suffix) < 8:
continue
if len(suffix) < 6:
continue
counts[suffix] = counts.get(suffix, 0) + 1
ranked = sorted(
counts.items(),
key=lambda kv: (kv[1], kv[0].count("_"), len(kv[0])),
reverse=True,
)
suffixes: List[str] = []
for suffix, _ in ranked:
if suffix not in suffixes:
suffixes.append(suffix)
if len(suffixes) >= max_suffixes:
break
return suffixes
def compress_to_known_suffix(var: str, allowed_suffixes: List[str]) -> Optional[str]:
"""将变量名压缩为已知的后缀形式"""
if not allowed_suffixes:
return None
var_lower = var.lower()
for suffix in allowed_suffixes:
if var_lower.endswith(suffix.lower()):
return suffix
if suffix.lower().endswith(var_lower):
return suffix
return None
def placeholder_is_reasonably_matchable(placeholder: str, dataset_ids: List[str]) -> bool:
"""检查占位符是否可以与数据集字段匹配"""
if not dataset_ids:
return True
v = placeholder.strip()
if not v:
return False
if len(v) <= 3:
pat = re.compile(rf"(^|_){re.escape(v)}(_|$)", flags=re.IGNORECASE)
return any(pat.search(str(fid)) for fid in dataset_ids)
return any(v in str(fid) for fid in dataset_ids)
def normalize_template_placeholders(
template: str,
dataset_ids: List[str],
allowed_suffixes: List[str],
dataset_code: Optional[str],
) -> Tuple[str, bool]:
"""规范化模板中的占位符为后缀形式"""
vars_in_template = re.findall(r"\{([A-Za-z0-9_]+)\}", template)
if not vars_in_template:
return template, False
mapping: Dict[str, str] = {}
for var in set(vars_in_template):
new_var = var
if dataset_code and new_var.lower().startswith(dataset_code.lower() + "_"):
new_var = new_var[len(dataset_code) + 1:]
compressed = compress_to_known_suffix(new_var, allowed_suffixes)
if compressed:
new_var = compressed
mapping[var] = new_var
normalized = template
for src, dst in mapping.items():
normalized = normalized.replace("{" + src + "}", "{" + dst + "}")
vars_after = re.findall(r"\{([A-Za-z0-9_]+)\}", normalized)
ok = all(placeholder_is_reasonably_matchable(v, dataset_ids) for v in vars_after)
return normalized, ok
def extract_template_blocks(markdown_text: str) -> List[Dict[str, str]]:
"""
从 Markdown 文本中解析 **Concept** 块并提取 {template, idea}
返回列表,每个元素包含:
- template: 实现示例模板(包含 {variable} 占位符)
- idea: Concept 的完整描述
"""
concept_re = re.compile(r"^\*\*Concept\*\*\s*:\s*(.*)\s*$")
impl_re = re.compile(r"\*\*Implementation Example\*\*\s*:\s*(.*)$", flags=re.IGNORECASE)
backtick_re = re.compile(r"`([^`]*)`")
boundary_re = re.compile(r"^(?:-{3,}|#{1,6}\s+.*)\s*$")
lines = markdown_text.splitlines()
blocks: List[List[str]] = []
current: List[str] = []
def _flush():
nonlocal current
if current:
while current and not current[0].strip():
current.pop(0)
while current and not current[-1].strip():
current.pop()
if current:
blocks.append(current)
current = []
for line in lines:
if concept_re.match(line.strip()):
_flush()
current = [line]
continue
if current and boundary_re.match(line.strip()):
_flush()
continue
if current:
current.append(line)
_flush()
out: List[Dict[str, str]] = []
for block_lines in blocks:
template: Optional[str] = None
impl_line_idx: Optional[int] = None
for i, raw in enumerate(block_lines):
m = impl_re.search(raw)
if not m:
continue
impl_line_idx = i
tail = (m.group(1) or "").strip()
bt = backtick_re.search(tail)
if bt:
template = bt.group(1).strip()
break
if tail and ("{" in tail and "}" in tail):
template = tail.strip().strip("`")
break
for j in range(i + 1, min(i + 4, len(block_lines))):
nxt = block_lines[j].strip()
if not nxt:
continue
bt2 = backtick_re.search(nxt)
if bt2:
template = bt2.group(1).strip()
break
if "{" in nxt and "}" in nxt:
template = nxt.strip().strip("`")
break
break
# 注释掉跳过逻辑,允许没有 {placeholder} 的模板
# if not template or "{" not in template or "}" not in template:
# continue
# 如果 template 为 None,跳过
if not template:
continue
template = template.strip()
if template.startswith('{') and template.endswith('}'):
template = template[1:-1].strip()
idea_lines: List[str] = []
for i, raw in enumerate(block_lines):
if impl_line_idx is not None and i == impl_line_idx:
continue
idea_lines.append(raw)
idea = "\n".join(idea_lines).strip()
out.append({"template": template.strip(), "idea": idea})
return out
def match_single_horizon_auto(field_ids: List[str], template: str) -> List[str]:
"""
从模板生成表达式,将每个 {variable} 匹配到数据集字段ID
Args:
field_ids: 数据集字段ID列表
template: 包含 {variable} 占位符的模板字符串
Returns:
生成的表达式列表
"""
metrics = re.findall(r'\{([A-Za-z0-9_]+)\}', template)
if not metrics:
return []
metrics = sorted(metrics, key=len, reverse=True)
primary = metrics[0]
if not field_ids:
return []
ids = [str(fid) for fid in field_ids if fid]
def _matches_metric(field_id: str, metric: str) -> bool:
fid = str(field_id)
m = str(metric)
if len(m) <= 3:
return re.search(rf"(^|_){re.escape(m)}(_|$)", fid, flags=re.IGNORECASE) is not None
return m in fid
def _common_prefix_len(a: str, b: str) -> int:
n = min(len(a), len(b))
i = 0
while i < n and a[i] == b[i]:
i += 1
return i
candidates_by_metric = {}
for m in metrics:
cands = [fid for fid in ids if _matches_metric(fid, m)]
seen = set()
uniq = []
for x in cands:
if x not in seen:
seen.add(x)
uniq.append(x)
candidates_by_metric[m] = uniq
if not candidates_by_metric.get(primary):
return []
for m in metrics[1:]:
if not candidates_by_metric.get(m):
return []
MAX_PRIMARY_CANDIDATES = 30
MAX_SECONDARY_CHOICES = 8
MAX_EXPRESSIONS = 5000
results = []
seen_expr = set()
primary_candidates = candidates_by_metric[primary][:MAX_PRIMARY_CANDIDATES]
for primary_id in primary_candidates:
chosen_by_metric = {primary: [primary_id]}
for m in metrics[1:]:
cands = candidates_by_metric[m]
ranked = sorted(cands, key=lambda fid: _common_prefix_len(primary_id, fid), reverse=True)
chosen_by_metric[m] = ranked[:MAX_SECONDARY_CHOICES]
metric_order = metrics
pools = [chosen_by_metric[m] for m in metric_order]
for combo in itertools.product(*pools):
field_map = dict(zip(metric_order, combo))
try:
expr = template.format(**field_map)
except Exception:
continue
if expr in seen_expr:
continue
seen_expr.add(expr)
results.append(expr)
if len(results) >= MAX_EXPRESSIONS:
return results
return results
# ==================== 表达式验证器 ====================
# 尝试导入PLY库,如果不存在则提供安装提示
try:
import ply.lex as lex
import ply.yacc as yacc
except ImportError:
print("错误: 需要安装PLY库。请运行 'pip install ply' 来安装。")
sys.exit(1)
# 1. 定义支持的操作符和函数
supported_functions = {
# Group 类别函数
'group_min': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_mean': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'category']},
'group_median': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_max': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_rank': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_vector_proj': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'category']},
'group_normalize': {'min_args': 2, 'max_args': 5, 'arg_types': ['expression', 'category', 'expression', 'expression', 'expression']},
'group_extra': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'category']},
'group_backfill': {'min_args': 3, 'max_args': 4, 'arg_types': ['expression', 'expression', 'expression', 'expression'], 'param_names': ['x', 'cat', 'days', 'std']},
'group_scale': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_count': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_zscore': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_std_dev': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_sum': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_neutralize': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'category']},
'group_multi_regression': {'min_args': 4, 'max_args': 9, 'arg_types': ['expression'] * 9},
'group_cartesian_product': {'min_args': 2, 'max_args': 2, 'arg_types': ['category', 'category']},
'combo_a': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression']},
# Transformational 类别函数
'right_tail': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'bucket': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'tail': {'min_args': 1, 'max_args': 4, 'arg_types': ['expression', 'expression', 'expression', 'expression']},
'left_tail': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'trade_when': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression']},
'generate_stats': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
# Cross Sectional 类别函数
'winsorize': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['x', 'std']},
'rank': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'regression_proj': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'vector_neut': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'regression_neut': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'multi_regression': {'min_args': 2, 'max_args': 100, 'arg_types': ['expression'] * 100},
# Time Series 类别函数
'ts_std_dev': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_mean': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_delay': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_corr': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number']},
'ts_zscore': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_returns': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'mode'], 'keyword_only': True},
'ts_product': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_backfill': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'd']},
'days_from_last_change': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'last_diff_value': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_scale': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'constant'], 'keyword_only': True},
'ts_entropy': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'd']},
'ts_step': {'min_args': 1, 'max_args': 1, 'arg_types': ['number']},
'ts_sum': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_co_kurtosis': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number']},
'inst_tvr': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_decay_exp_window': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'factor'], 'keyword_only': True},
'ts_av_diff': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_kurtosis': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_min_max_diff': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'f'], 'keyword_only': True},
'ts_arg_max': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_max': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_min_max_cps': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'f'], 'keyword_only': True},
'ts_rank': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'constant'], 'keyword_only': True},
'ts_ir': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_theilsen': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number']},
'hump_decay': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'p'], 'keyword_only': True},
'ts_weighted_decay': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'k'], 'keyword_only': True},
'ts_quantile': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'string'], 'param_names': ['x', 'd', 'driver'], 'keyword_only': True},
'ts_min': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_count_nans': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_covariance': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number']},
'ts_co_skewness': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number']},
'ts_min_diff': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_decay_linear': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'boolean'], 'param_names': ['x', 'd', 'dense'], 'keyword_only': True},
'jump_decay': {'min_args': 2, 'max_args': 4, 'arg_types': ['expression', 'number', 'number', 'number'], 'param_names': ['x', 'd', 'sensitivity', 'force'],
'keyword_only': True},
'ts_moment': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'k'], 'keyword_only': True},
'ts_arg_min': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_regression': {'min_args': 3, 'max_args': 5, 'arg_types': ['expression', 'expression', 'number', 'number', 'number'], 'param_names': ['y', 'x', 'd', 'lag', 'rettype'],
'keyword_only': True},
'ts_skewness': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_max_diff': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'kth_element': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'number', 'number']},
'hump': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'hump']},
'ts_median': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_delta': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
'ts_poly_regression': {'min_args': 3, 'max_args': 4, 'arg_types': ['expression', 'expression', 'number', 'number'], 'param_names': ['y', 'x', 'd', 'k'], 'keyword_only': True,
'keyword_only_from': 3},
'ts_target_tvr_decay': {'min_args': 1, 'max_args': 4, 'arg_types': ['expression', 'number', 'number', 'number'], 'param_names': ['x', 'lambda_min', 'lambda_max', 'target_tvr'],
'keyword_only': True},
'ts_target_tvr_delta_limit': {'min_args': 2, 'max_args': 5, 'arg_types': ['expression', 'expression', 'number', 'number', 'number'],
'param_names': ['x', 'y', 'lambda_min', 'lambda_max', 'target_tvr'], 'keyword_only': True},
'ts_target_tvr_hump': {'min_args': 1, 'max_args': 4, 'arg_types': ['expression', 'number', 'number', 'number'], 'param_names': ['x', 'lambda_min', 'lambda_max', 'target_tvr'],
'keyword_only': True},
'ts_delta_limit': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number'], 'param_names': ['x', 'y', 'limit_volume'], 'keyword_only': True},
# Special 类别函数
'inst_pnl': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'self_corr': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'in': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'universe_size': {'min_args': 0, 'max_args': 0, 'arg_types': []},
# Missing functions from operators.py
'quantile': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['x', 'driver', 'sigma']},
'normalize': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'boolean', 'number']},
'zscore': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
# Logical 类别函数
'or': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'and': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'not': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'is_nan': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'is_not_nan': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'less': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'equal': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'greater': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'is_finite': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'if_else': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression']},
'not_equal': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'less_equal': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'greater_equal': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
# Vector 类别函数
'vec_kurtosis': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_min': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_count': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_sum': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_skewness': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_max': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_avg': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_range': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_choose': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'nth']},
'vec_powersum': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'constant']},
'vec_stddev': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_percentage': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'percentage']},
'vec_ir': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'vec_norm': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'ts_percentage': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'd', 'percentage']},
'signed_power': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number']},
# Additional functions from test cases
'rank_by_side': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'number', 'number'], 'param_names': ['x', 'rate', 'scale']},
'log_diff': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'nan_mask': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'ts_partial_corr': {'min_args': 4, 'max_args': 4, 'arg_types': ['expression', 'expression', 'expression', 'number']},
'ts_triple_corr': {'min_args': 4, 'max_args': 4, 'arg_types': ['expression', 'expression', 'expression', 'number']},
'clamp': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['x', 'lower', 'upper']},
'keep': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number'], 'param_names': ['x', 'condition', 'period']},
'replace': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['x', 'target', 'dest']},
'filter': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['x', 'h', 't']},
'one_side': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'string'], 'param_names': ['x', 'side']},
'scale_down': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'number'], 'param_names': ['x', 'constant']},
# Arithmetic 类别函数
'add': {'min_args': 2, 'max_args': 101, 'arg_types': ['expression'] * 101},
'multiply': {'min_args': 2, 'max_args': 100, 'arg_types': ['expression'] * 99 + ['boolean'], 'param_names': ['x', 'y', 'filter']},
'sign': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'subtract': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'expression', 'boolean']},
'pasteurize': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'log': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'purify': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'arc_tan': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'max': {'min_args': 2, 'max_args': 100, 'arg_types': ['expression'] * 100},
'to_nan': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'expression', 'boolean']},
'abs': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'sigmoid': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'divide': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'min': {'min_args': 2, 'max_args': 100, 'arg_types': ['expression'] * 100},
'tanh': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'nan_out': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['x', 'lower', 'upper']},
'signed_power': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'inverse': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'round': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'sqrt': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
's_log_1p': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'reverse': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'power': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression']},
'densify': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'floor': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression']},
'arc_cos': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['x']},
'arc_sin': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['x']},
'ceiling': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['x']},
'exp': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['x']},
'fraction': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['x']},
'round_down': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['x', 'f']},
'is_not_finite': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'negate': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'ts_rank_gmean_amean_diff': {'min_args': 5, 'max_args': 5, 'arg_types': ['expression', 'expression', 'expression', 'expression', 'number'],
'param_names': ['input1', 'input2', 'input3', '...', 'd']},
'ts_vector_neut': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number'], 'param_names': ['x', 'y', 'd']},
'ts_vector_proj': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'number'], 'param_names': ['x', 'y', 'd']},
'scale': {'min_args': 1, 'max_args': 4, 'arg_types': ['expression', 'expression', 'expression', 'expression'], 'param_names': ['x', 'scale', 'longscale', 'shortscale']},
'generalized_rank': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['open', 'm']},
'rank_gmean_amean_diff': {'min_args': 4, 'max_args': 4, 'arg_types': ['expression', 'expression', 'expression', 'expression'],
'param_names': ['input1', 'input2', 'input3', '...']},
'truncate': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['x', 'maxPercent']},
'vector_proj': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['x', 'y']},
'vec_filter': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['vec', 'value']},
'group_coalesce': {'min_args': 4, 'max_args': 4, 'arg_types': ['expression', 'expression', 'expression', 'expression'],
'param_names': ['original_group', 'group2', 'group3', '']},
'group_percentage': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'category', 'expression'], 'param_names': ['x', 'group', 'percentage']},
'group_vector_neut': {'min_args': 3, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['x', 'y', 'g']},
'convert': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['x', 'mode']},
'reduce_avg': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['input', 'threshold']},
'reduce_choose': {'min_args': 2, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['input', 'nth', 'ignoreNan']},
'reduce_count': {'min_args': 2, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['input', 'threshold']},
'reduce_ir': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'reduce_kurtosis': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'reduce_max': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'reduce_min': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'reduce_norm': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'reduce_percentage': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['input', 'percentage']},
'reduce_powersum': {'min_args': 1, 'max_args': 3, 'arg_types': ['expression', 'expression', 'expression'], 'param_names': ['input', 'constant', 'precise']},
'reduce_range': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'reduce_skewness': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
'reduce_stddev': {'min_args': 1, 'max_args': 2, 'arg_types': ['expression', 'expression'], 'param_names': ['input', 'threshold']},
'reduce_sum': {'min_args': 1, 'max_args': 1, 'arg_types': ['expression'], 'param_names': ['input']},
}
# 2. 定义group类型字段
group_fields = {
'sector', 'subindustry', 'industry', 'exchange', 'country', 'market'
}
# 3. 有效类别集合
valid_categories = group_fields
# 4. 字段命名模式 - 只校验字段是不是数字字母下划线组成
field_patterns = [
re.compile(r'^[a-zA-Z0-9_]+$'),
]
# 4. 抽象语法树节点类型
class ASTNode:
"""抽象语法树节点基类"""
def __init__(self, node_type: str, children: Optional[List['ASTNode']] = None,
value: Optional[Any] = None, line: Optional[int] = None):
self.node_type = node_type
self.children = children or []
self.value = value
self.line = line
def __str__(self) -> str:
return f"ASTNode({self.node_type}, {self.value}, line={self.line})"
def __repr__(self) -> str:
return self.__str__()
class ExpressionValidator:
"""表达式验证器类"""
def __init__(self):
"""初始化词法分析器和语法分析器"""
self.lexer = lex.lex(module=self, debug=False)
self.parser = yacc.yacc(module=self, debug=False)
self.errors = []
self._unit_cache: Dict[int, str] = {}
self._derived_category_cache: Dict[int, bool] = {}
# 词法分析器规则
tokens = ('FUNCTION', 'FIELD', 'NUMBER', 'LPAREN', 'RPAREN',
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'COMMA', 'CATEGORY',
'EQUAL', 'ASSIGN', 'IDENTIFIER', 'STRING', 'GREATER', 'LESS', 'GREATEREQUAL', 'LESSEQUAL', 'NOTEQUAL', 'BOOLEAN')
t_ignore = ' \t\n'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = r','
t_EQUAL = r'=='
t_NOTEQUAL = r'!='
t_GREATEREQUAL = r'>='
t_LESSEQUAL = r'<='
t_GREATER = r'>'
t_LESS = r'<'
t_ASSIGN = r'='
def t_NUMBER(self, t):
r'\d+\.?\d*'
if '.' in t.value:
t.value = float(t.value)
else:
t.value = int(t.value)
return t
def t_STRING(self, t):
r"'[^']*'|\"[^\"]*\""
t.value = t.value[1:-1]
return t
def t_IDENTIFIER(self, t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
if t.value.lower() in {'true', 'false'}:
t.type = 'BOOLEAN'
t.value = t.value.lower()
else:
lexpos = t.lexpos
next_chars = ''
if lexpos + len(t.value) < len(t.lexer.lexdata):
next_pos = lexpos + len(t.value)
while next_pos < len(t.lexer.lexdata) and t.lexer.lexdata[next_pos].isspace():
next_pos += 1
if next_pos < len(t.lexer.lexdata):
next_chars = t.lexer.lexdata[next_pos:next_pos + 1]
if next_chars == '=':
t.type = 'IDENTIFIER'
elif next_chars == '(':
t.type = 'FUNCTION'
t.value = t.value.lower()
elif t.value in {'std', 'k', 'lambda_min', 'lambda_max', 'target_tvr', 'range', 'buckets', 'lag', 'rettype', 'mode', 'nth', 'constant', 'percentage', 'driver', 'sigma',
'rate', 'scale', 'filter', 'lower', 'upper', 'target', 'dest', 'event', 'sensitivity', 'force', 'h', 't', 'period', 'stddev', 'factor', 'k', 'useStd',
'limit', 'gaussian', 'uniform', 'cauchy'}:
t.type = 'IDENTIFIER'
elif t.value.lower() in supported_functions:
t.type = 'FUNCTION'
t.value = t.value.lower()
elif t.value in valid_categories:
t.type = 'CATEGORY'
elif self._is_valid_field(t.value):
t.type = 'FIELD'
else:
t.type = 'IDENTIFIER'
return t
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(self, t):
if t:
if not re.match(r'[a-zA-Z0-9_\+\-\*/\(\)\,\s=<>!]', t.value[0]):
self.errors.append(f"非法字符 '{t.value[0]}' (行 {t.lexer.lineno})")
else:
self.errors.append(f"非法标记 '{t.value}' (行 {t.lexer.lineno})")
t.lexer.skip(1)
else:
self.errors.append("词法分析器到达文件末尾")
# 语法分析器规则
def p_expression(self, p):
"""expression : comparison
| expression EQUAL comparison
| expression NOTEQUAL comparison
| expression GREATER comparison
| expression LESS comparison
| expression GREATEREQUAL comparison
| expression LESSEQUAL comparison"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ASTNode('binop', [p[1], p[3]], {'op': p[2]})
def p_comparison(self, p):
"""comparison : term
| comparison PLUS term
| comparison MINUS term"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ASTNode('binop', [p[1], p[3]], {'op': p[2]})
def p_term(self, p):
"""term : factor
| term TIMES factor
| term DIVIDE factor"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ASTNode('binop', [p[1], p[3]], {'op': p[2]})
def p_factor(self, p):
"""factor : NUMBER
| STRING
| FIELD
| CATEGORY
| IDENTIFIER
| BOOLEAN
| MINUS factor
| LPAREN expression RPAREN
| function_call"""
if len(p) == 2:
if p.slice[1].type == 'NUMBER':
p[0] = ASTNode('number', value=p[1])
elif p.slice[1].type == 'STRING':
p[0] = ASTNode('string', value=p[1])
elif p.slice[1].type == 'FIELD':
p[0] = ASTNode('field', value=p[1])
elif p.slice[1].type == 'CATEGORY':
p[0] = ASTNode('category', value=p[1])
elif p.slice[1].type == 'BOOLEAN':
p[0] = ASTNode('boolean', value=p[1])
elif p.slice[1].type == 'IDENTIFIER':
p[0] = ASTNode('identifier', value=p[1])
else:
p[0] = p[1]
elif len(p) == 3:
p[0] = ASTNode('unop', [p[2]], {'op': p[1]})
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_function_call(self, p):
'''function_call : FUNCTION LPAREN args RPAREN'''
p[0] = ASTNode('function', p[3], p[1])
def p_args(self, p):
'''args : arg_list
| empty'''
if len(p) == 2 and p[1] is not None:
p[0] = p[1]
else:
p[0] = []
def p_arg_list(self, p):
'''arg_list : arg
| arg_list COMMA arg'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_arg(self, p):
'''arg : expression
| IDENTIFIER ASSIGN expression'''
if len(p) == 2:
p[0] = {'type': 'positional', 'value': p[1]}
else:
p[0] = {'type': 'named', 'name': p[1], 'value': p[3]}
def p_empty(self, p):
'''empty :'''
p[0] = None
def p_error(self, p):
if p:
self.errors.append(f"语法错误在位置 {p.lexpos}: 非法标记 '{p.value}'")
else:
self.errors.append("语法错误: 表达式不完整")
def _is_valid_field(self, field_name: str) -> bool:
"""检查字段名是否符合模式"""
for pattern in field_patterns:
if pattern.match(field_name):
return True
return False
def validate_function(self, node: ASTNode, is_in_group_arg: bool = False) -> List[str]:
"""验证函数调用的参数数量和类型"""
function_name = node.value
args = node.children
function_info = supported_functions.get(function_name)
if not function_info:
return [f"未知函数: {function_name}"]
if function_name == 'add':
return self._validate_add(args, is_in_group_arg)
errors = []
keyword_only_from = function_info.get('keyword_only_from')
if keyword_only_from is None and function_info.get('keyword_only'):
keyword_only_from = function_info.get('min_args', 0)
if len(args) < function_info['min_args']:
errors.append(f"函数 {function_name} 需要至少 {function_info['min_args']} 个参数,但只提供了 {len(args)}")
elif len(args) > function_info['max_args']:
errors.append(f"函数 {function_name} 最多接受 {function_info['max_args']} 个参数,但提供了 {len(args)}")
positional_index = 0
for arg in args:
if isinstance(arg, dict):
if arg['type'] == 'named':
if 'param_names' in function_info and arg['name'] in function_info['param_names']:
param_index = function_info['param_names'].index(arg['name'])
if param_index < len(function_info['arg_types']):
expected_type = function_info['arg_types'][param_index]
arg_errors = self._validate_arg_type(arg['value'], expected_type, param_index, function_name, is_in_group_arg)
errors.extend(arg_errors)
elif function_name == 'winsorize' and arg['name'] in ['std', 'clip']:
arg_errors = self._validate_arg_type(arg['value'], 'number', 0, function_name, is_in_group_arg)
errors.extend(arg_errors)
elif function_name == 'bucket' and arg['name'] in ['range', 'buckets']:
arg_errors = self._validate_arg_type(arg['value'], 'string', 1, function_name, is_in_group_arg)
errors.extend(arg_errors)
else:
errors.append(f"函数 {function_name} 不存在参数 '{arg['name']}'")
elif arg['type'] == 'positional':
if keyword_only_from is not None and positional_index >= keyword_only_from:
param_name = None
if 'param_names' in function_info and positional_index < len(function_info['param_names']):
param_name = function_info['param_names'][positional_index]
if param_name:
errors.append(f"函数 {function_name} 的第{positional_index + 1}个参数必须使用命名参数 '{param_name}='")
else:
errors.append(f"函数 {function_name} 的第{positional_index + 1}个参数必须使用命名参数")
else:
if positional_index < len(function_info['arg_types']):
expected_type = function_info['arg_types'][positional_index]
arg_errors = self._validate_arg_type(arg['value'], expected_type, positional_index, function_name, is_in_group_arg)
errors.extend(arg_errors)
positional_index += 1
else:
errors.append(f"参数 {positional_index + 1} 格式错误")
positional_index += 1
else:
if keyword_only_from is not None and positional_index >= keyword_only_from:
param_name = None
if 'param_names' in function_info and positional_index < len(function_info['param_names']):
param_name = function_info['param_names'][positional_index]
if param_name:
errors.append(f"函数 {function_name} 的第{positional_index + 1}个参数必须使用命名参数 '{param_name}='")
else:
errors.append(f"函数 {function_name} 的第{positional_index + 1}个参数必须使用命名参数")
else:
if positional_index < len(function_info['arg_types']):
expected_type = function_info['arg_types'][positional_index]
arg_errors = self._validate_arg_type(arg, expected_type, positional_index, function_name, is_in_group_arg)
errors.extend(arg_errors)
positional_index += 1
return errors
def _validate_arg_type(self, arg: ASTNode, expected_type: str, arg_index: int, function_name: str, is_in_group_arg: bool = False) -> List[str]:
"""验证参数类型是否符合预期"""
errors = []
def _is_number_like(node: ASTNode) -> bool:
if node is None:
return False
if node.node_type == 'number':
return True
if node.node_type == 'unop' and isinstance(node.value, dict) and node.value.get('op') in {'-', '+'}:
if node.children and hasattr(node.children[0], 'node_type'):
return _is_number_like(node.children[0])
return False
if self._is_derived_category(arg) and expected_type != 'category':
errors.append(
f"Incompatible unit for input of \"{function_name}\" at index {arg_index}, expected \"Unit[]\", found \"Unit[Group:1]\""
)
return errors
if arg.node_type == 'category' and arg.value in group_fields:
if not (function_name.startswith('group_') or is_in_group_arg):
errors.append(f"Group类型字段 '{arg.value}' 只能用于Group类型函数的参数中")
if expected_type == 'expression':
pass
elif expected_type == 'number':
if not _is_number_like(arg):
errors.append(f"参数 {arg_index + 1} 应该是一个数字,但得到 {arg.node_type}")
elif expected_type == 'boolean':
if arg.node_type not in {'boolean', 'number'}:
errors.append(f"参数 {arg_index + 1} 应该是一个布尔值(true/false 或 0/1),但得到 {arg.node_type}")
elif expected_type == 'field':
if arg.node_type != 'field' and arg.node_type != 'category':
errors.append(f"参数 {arg_index + 1} 应该是一个字段,但得到 {arg.node_type}")
elif arg.node_type == 'field' and not self._is_valid_field(arg.value):
errors.append(f"无效的字段名: {arg.value}")
elif expected_type == 'category':
if not function_name.startswith('group_'):
if arg.node_type != 'category':
errors.append(f"参数 {arg_index + 1} 应该是一个类别,但得到 {arg.node_type}")
elif arg.value not in valid_categories:
errors.append(f"无效的类别: {arg.value}")
return errors
def _infer_unit(self, node: ASTNode) -> str:
"""Infer the Unit kind of an AST node."""
if node is None:
return 'unit'
cache_key = id(node)
cached = self._unit_cache.get(cache_key)
if cached is not None:
return cached
unit = 'unit'
if node.node_type in {'number', 'boolean', 'string'}:
unit = 'scalar'
elif node.node_type in {'field', 'identifier'}:
unit = 'unit'
elif node.node_type == 'category':
unit = 'category'
elif node.node_type in {'unop', 'binop'}:
child_units = [self._infer_unit(child) for child in node.children if hasattr(child, 'node_type')]
unit = 'category' if 'category' in child_units else 'unit'
elif node.node_type == 'function':
fname = node.value
if fname in {'bucket', 'group_cartesian_product'}:
unit = 'category'
else:
first_arg = None
for child in node.children:
if isinstance(child, dict):
if child.get('type') == 'positional':
first_arg = child.get('value')
break
else:
first_arg = child
break
if hasattr(first_arg, 'node_type'):
unit = self._infer_unit(first_arg)
else:
unit = 'unit'
self._unit_cache[cache_key] = unit
return unit
def _is_derived_category(self, node: ASTNode) -> bool:
"""Return True if node is a derived category/grouping key."""
if node is None:
return False
cache_key = id(node)
cached = self._derived_category_cache.get(cache_key)
if cached is not None:
return cached
derived = False
if node.node_type == 'function':
if node.value in {'bucket', 'group_cartesian_product'}:
derived = True
else:
function_info = supported_functions.get(node.value, {})
arg_types = function_info.get('arg_types', [])
param_names = function_info.get('param_names', [])
positional_index = 0
for child in node.children:
if isinstance(child, dict):
if child.get('type') == 'named':
name = child.get('name')
value = child.get('value')
expected_type = None
if name in param_names:
param_index = param_names.index(name)
if param_index < len(arg_types):
expected_type = arg_types[param_index]
if expected_type == 'category':
continue
if self._is_derived_category(value):
derived = True
break
elif child.get('type') == 'positional':
value = child.get('value')
expected_type = arg_types[positional_index] if positional_index < len(arg_types) else None
if expected_type != 'category' and self._is_derived_category(value):
derived = True
break
positional_index += 1
else:
expected_type = arg_types[positional_index] if positional_index < len(arg_types) else None
if expected_type != 'category' and self._is_derived_category(child):
derived = True
break
positional_index += 1
elif node.node_type in {'unop', 'binop'}:
derived = any(
self._is_derived_category(child)
for child in node.children
if hasattr(child, 'node_type')
)
self._derived_category_cache[cache_key] = derived
return derived
def _validate_add(self, args: List[Any], is_in_group_arg: bool = False) -> List[str]:
"""Validate add(x, y, ..., filter=false)."""
errors: List[str] = []
if len(args) < 2:
return [f"函数 add 需要至少 2 个参数,但只提供了 {len(args)}"]
named_filter_nodes: List[ASTNode] = []
positional_nodes: List[ASTNode] = []
for arg in args:
if isinstance(arg, dict) and arg.get('type') == 'named':
name = arg.get('name')
value = arg.get('value')
if name != 'filter':
errors.append(f"函数 add 不存在参数 '{name}'")
continue
if not hasattr(value, 'node_type'):
errors.append("函数 add 的参数 filter 格式错误")
continue
named_filter_nodes.append(value)
elif isinstance(arg, dict) and arg.get('type') == 'positional':
value = arg.get('value')
if hasattr(value, 'node_type'):
positional_nodes.append(value)
else:
errors.append("函数 add 的位置参数格式错误")
elif hasattr(arg, 'node_type'):
positional_nodes.append(arg)
else:
errors.append("函数 add 的参数格式错误")
if len(named_filter_nodes) > 1:
errors.append("函数 add 的参数 'filter' 只能出现一次")
positional_filter_node: Optional[ASTNode] = None
if not named_filter_nodes and len(positional_nodes) >= 3:
last = positional_nodes[-1]
if last.node_type == 'boolean' or (last.node_type == 'number' and last.value in {0, 1}):
positional_filter_node = positional_nodes.pop()
if len(positional_nodes) < 2:
errors.append(f"函数 add 需要至少 2 个输入项(不含filter),但只提供了 {len(positional_nodes)}")
for idx, node in enumerate(positional_nodes):
errors.extend(self._validate_arg_type(node, 'expression', idx, 'add', is_in_group_arg))
if positional_filter_node is not None and named_filter_nodes:
errors.append("函数 add 的 filter 不能同时用位置参数和命名参数传递")
if positional_filter_node is not None:
errors.extend(self._validate_arg_type(positional_filter_node, 'boolean', len(positional_nodes), 'add', is_in_group_arg))
if named_filter_nodes:
errors.extend(self._validate_arg_type(named_filter_nodes[0], 'boolean', len(positional_nodes), 'add', is_in_group_arg))
return errors
def validate_ast(self, ast: Optional[ASTNode], is_in_group_arg: bool = False) -> List[str]:
"""递归验证抽象语法树"""
if not ast:
return ["无法解析表达式"]
errors = []
if ast.node_type == 'function':
is_group_function = ast.value.startswith('group_')
current_in_group_arg = is_in_group_arg or is_group_function
function_errors = self.validate_function(ast, current_in_group_arg)
errors.extend(function_errors)
for child in ast.children:
if isinstance(child, dict):
if 'value' in child and hasattr(child['value'], 'node_type'):
child_errors = self.validate_ast(child['value'], current_in_group_arg)
errors.extend(child_errors)
elif hasattr(child, 'node_type'):
child_errors = self.validate_ast(child, current_in_group_arg)
errors.extend(child_errors)
elif ast.node_type in ['unop', 'binop']:
for child in ast.children:
if hasattr(child, 'node_type'):
child_errors = self.validate_ast(child, is_in_group_arg)
errors.extend(child_errors)
elif ast.node_type == 'field':
if not self._is_valid_field(ast.value):
errors.append(f"无效的字段名: {ast.value}")
else:
for child in ast.children:
if isinstance(child, dict):
if 'value' in child and hasattr(child['value'], 'node_type'):
child_errors = self.validate_ast(child['value'], is_in_group_arg)
errors.extend(child_errors)
elif hasattr(child, 'node_type'):
child_errors = self.validate_ast(child, is_in_group_arg)
errors.extend(child_errors)
return errors
def _process_semicolon_expression(self, expression: str) -> Tuple[bool, str]:
"""处理带有分号的表达式,将其转换为不带分号的简化形式"""
def _top_level_equals_positions(stmt: str) -> List[int]:
positions: List[int] = []
paren_depth = 0
bracket_depth = 0
brace_depth = 0
in_single_quote = False
in_double_quote = False
escape = False
for i, ch in enumerate(stmt):
if escape:
escape = False
continue
if ch == '\\':
escape = True
continue
if in_single_quote:
if ch == "'":
in_single_quote = False
continue
if in_double_quote:
if ch == '"':
in_double_quote = False
continue
if ch == "'":
in_single_quote = True
continue
if ch == '"':
in_double_quote = True
continue
if ch == '(':
paren_depth += 1
continue
if ch == ')':
paren_depth = max(0, paren_depth - 1)
continue
if ch == '[':
bracket_depth += 1
continue
if ch == ']':
bracket_depth = max(0, bracket_depth - 1)
continue
if ch == '{':
brace_depth += 1
continue
if ch == '}':
brace_depth = max(0, brace_depth - 1)
continue
if paren_depth or bracket_depth or brace_depth:
continue
if ch != '=':
continue
prev_ch = stmt[i - 1] if i > 0 else ''
next_ch = stmt[i + 1] if i + 1 < len(stmt) else ''
if prev_ch in ['=', '!', '<', '>'] or next_ch == '=':
continue
positions.append(i)
return positions
def _keyword_arg_names(stmt: str):
names = set()
paren_depth = 0
bracket_depth = 0
brace_depth = 0
in_single_quote = False
in_double_quote = False
escape = False
i = 0
while i < len(stmt):
ch = stmt[i]
if escape:
escape = False
i += 1
continue
if ch == '\\':
escape = True
i += 1
continue
if in_single_quote:
if ch == "'":
in_single_quote = False
i += 1
continue
if in_double_quote:
if ch == '"':
in_double_quote = False
i += 1
continue
if ch == "'":
in_single_quote = True
i += 1
continue
if ch == '"':
in_double_quote = True
i += 1
continue
if ch == '(':
paren_depth += 1
i += 1
continue
if ch == ')':
paren_depth = max(0, paren_depth - 1)
i += 1
continue
if ch == '[':
bracket_depth += 1
i += 1
continue
if ch == ']':
bracket_depth = max(0, bracket_depth - 1)
i += 1
continue
if ch == '{':
brace_depth += 1
i += 1
continue
if ch == '}':
brace_depth = max(0, brace_depth - 1)
i += 1
continue
inside_container = bool(paren_depth or bracket_depth or brace_depth)
if inside_container and (ch.isalpha() or ch == '_'):
start = i
i += 1
while i < len(stmt) and (stmt[i].isalnum() or stmt[i] == '_'):
i += 1
name = stmt[start:i]
j = i
while j < len(stmt) and stmt[j].isspace():
j += 1
if j < len(stmt) and stmt[j] == '=':
next_ch = stmt[j + 1] if j + 1 < len(stmt) else ''
if next_ch != '=':
names.add(name.lower())
continue
i += 1
return names
if expression.strip().endswith(';'):
return False, "表达式不能以分号结尾"
statements = [stmt.strip() for stmt in expression.split(';') if stmt.strip()]
if not statements:
return False, "表达式不能为空"
variables = {}
for i, stmt in enumerate(statements[:-1]):
eq_positions = _top_level_equals_positions(stmt)
if not eq_positions:
return False, f"{i + 1}个语句必须是赋值语句(使用=符号)"
if len(eq_positions) > 1:
return False, f"{i + 1}个语句只能包含一个赋值符号(=)"
real_equals_pos = eq_positions[0]
var_name = stmt[:real_equals_pos].strip()
var_value = stmt[real_equals_pos + 1:].strip()
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', var_name):
return False, f"{i + 1}个语句的变量名'{var_name}'无效,只能包含字母、数字和下划线,且不能以数字开头"
var_name_lower = var_name.lower()
kw_names = _keyword_arg_names(var_value)
used_vars = re.findall(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b', var_value)
for used_var in used_vars:
used_var_lower = used_var.lower()
if used_var_lower in kw_names:
continue
if used_var_lower not in variables:
if used_var not in supported_functions:
if len(used_var) <= 2:
return False, f"{i + 1}个语句中使用的变量'{used_var}'未在之前定义"
elif not self._is_valid_field(used_var):
return False, f"{i + 1}个语句中使用的变量'{used_var}'未在之前定义"
for existing_var, existing_val in variables.items():
var_value = re.sub(rf'\b{existing_var}\b', existing_val, var_value)
variables[var_name_lower] = var_value
final_stmt = statements[-1]
if _top_level_equals_positions(final_stmt):
return False, "最后一个语句不能是赋值语句"
kw_names = _keyword_arg_names(final_stmt)
used_vars = re.findall(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b', final_stmt)
for used_var in used_vars:
used_var_lower = used_var.lower()
if used_var_lower in kw_names:
continue
if used_var_lower not in variables:
if used_var not in supported_functions:
if len(used_var) <= 2:
return False, f"最后一个语句中使用的变量'{used_var}'未在之前定义"
if self._is_valid_field(used_var) or used_var_lower in valid_categories or used_var_lower in group_fields:
continue
return False, f"最后一个语句中使用的变量'{used_var}'未在之前定义"
final_expr = final_stmt
for var_name, var_value in variables.items():
final_expr = re.sub(rf'\b{var_name}\b', var_value, final_expr)
return True, final_expr
def check_expression(self, expression: str) -> Dict[str, Any]:
"""检查表达式格式是否正确"""
self.errors = []
self._unit_cache = {}
self._derived_category_cache = {}
try:
expression = expression.strip()
if not expression:
return {
'valid': False,
'errors': ['表达式不能为空'],
'tokens': [],
'ast': None
}
if ';' in expression:
success, result = self._process_semicolon_expression(expression)
if not success:
return {
'valid': False,
'errors': [result],
'tokens': [],
'ast': None
}
expression = result
self.lexer.lineno = 1
self.lexer.input(expression)
tokens = []
for token in self.lexer:
tokens.append(token)
self.lexer.input(expression)
self.lexer.lineno = 1
ast = self.parser.parse(expression, lexer=self.lexer)
validation_errors = self.validate_ast(ast)
all_errors = self.errors + validation_errors
bracket_count = 0
for char in expression:
if char == '(':
bracket_count += 1
elif char == ')':
bracket_count -= 1
if bracket_count < 0:
all_errors.append("括号不匹配: 右括号过多")
break
if bracket_count > 0:
all_errors.append("括号不匹配: 左括号过多")
return {
'valid': len(all_errors) == 0,
'errors': all_errors,
'tokens': tokens,
'ast': ast
}
except Exception as e:
return {
'valid': False,
'errors': [f"解析错误: {str(e)}"],
'tokens': [],
'ast': None
}
def validate_expressions(self, expressions: List[str]) -> Tuple[List[str], List[Dict]]:
"""批量验证表达式"""
valid_expressions = []
invalid_details = []
for expr in expressions:
if not isinstance(expr, str) or not expr.strip():
invalid_details.append({
'expression': expr,
'error': '空表达式'
})
continue
result = self.check_expression(expr.strip())
if result['valid']:
valid_expressions.append(expr.strip())
else:
invalid_details.append({
'expression': expr,
'errors': result['errors']
})
return valid_expressions, invalid_details
# ==================== 主处理流程 ====================
def process(data_sets_list: List[Dict], llm_template: str) -> Dict:
"""
处理 LLM 生成的模板,生成 Alpha 表达式
参数:
data_sets_list: 数据集字段列表,格式 [{'id': 'field_name'}, ...]
llm_template: LLM 生成的 Markdown 文本
返回:
包含生成结果的字典
"""
# 提取 field_ids
field_ids = [item['id'] for item in data_sets_list]
# 提取 dataset_ids 和 allowed_suffixes
dataset_ids = field_ids
allowed_suffixes = build_allowed_metric_suffixes(field_ids)
dataset_code = detect_dataset_code(dataset_ids)
print(f" 数据集字段数: {len(dataset_ids)}")
print(f" 提取的后缀数: {len(allowed_suffixes)}")
print(f" 数据集代码: {dataset_code}")
# Step 1: 提取 template blocks
print("\n🔍 提取 Template Blocks...")
block_pairs = extract_template_blocks(llm_template)
print(f" 提取到 {len(block_pairs)} 个 Concept blocks")
if not block_pairs:
print(" 没有找到有效的 Concept blocks")
return {
"success": False,
"error": "没有找到有效的 Concept blocks",
"expressions": [],
"templates": []
}
# Step 2: 规范化 templates
print("\n📝 规范化 Templates...")
normalized_pairs = []
for item in block_pairs:
template = str(item.get("template") or "").strip()
idea_text = str(item.get("idea") or "").strip()
if not template:
continue
normalized_t, ok = normalize_template_placeholders(
template, dataset_ids, allowed_suffixes, dataset_code
)
# 允许没有 {placeholder} 的模板通过
# if ok:
normalized_pairs.append({
"template": normalized_t,
"idea": idea_text,
"original_template": template
})
print(f"{normalized_t[:60]}...")
# else:
# print(f" ✗ 跳过: {template[:60]}...")
print(f" 规范化成功: {len(normalized_pairs)}")
# Step 3: 生成表达式
print("\n🎯 生成 Alpha 表达式...")
all_expressions = []
template_results = []
for idx, pair in enumerate(normalized_pairs, 1):
template = pair["template"]
expressions = match_single_horizon_auto(field_ids, template)
# 如果没有生成表达式(没有{placeholder}),直接把模板当作表达式
if not expressions and template:
expressions = [template]
if expressions:
result_item = {
"template": template,
"original_template": pair.get("original_template", template),
"idea": pair.get("idea", ""),
"expression_count": len(expressions),
"expressions": expressions
}
template_results.append(result_item)
all_expressions.extend(expressions)
print(f" Idea {idx}: {len(expressions):4d} 个表达式 - {template[:50]}...")
print(f"\n 总计生成: {len(all_expressions)} 个表达式")
# Step 4: 验证表达式
print("\n🔍 验证表达式...")
validator = ExpressionValidator()
valid_expressions, invalid_details = validator.validate_expressions(all_expressions)
invalid_count = len(invalid_details)
print(f" 有效: {len(valid_expressions)}")
print(f" 无效: {invalid_count}")
if invalid_details:
print(f" 无效表达式详情:")
for detail in invalid_details[:5]:
print(f" - {detail['expression'][:50]}...: {detail.get('errors', detail.get('error', '未知错误'))}")
if len(invalid_details) > 5:
print(f" ... 还有 {len(invalid_details) - 5} 个无效表达式")
# Step 5: 去重
print("\n🧹 去重...")
unique_expressions = []
seen = set()
for expr in valid_expressions:
if expr not in seen:
unique_expressions.append(expr)
seen.add(expr)
print(f" 去重后: {len(unique_expressions)}")
# Step 6: 过滤包含英文逗号的表达式和模板 过滤表达式:只保留包含英文逗号的
print("\n🧹 过滤包含英文逗号的表达式和模板...")
filtered_expressions = [expr for expr in unique_expressions if ',' in expr]
print(f" 过滤后表达式: {len(filtered_expressions)} 个 (原 {len(unique_expressions)} 个)")
# 过滤模板:只保留包含英文逗号的模板,且其表达式也要包含英文逗号
filtered_template_results = []
for template_item in template_results:
template = template_item.get("template", "")
# 检查模板是否包含英文逗号
if ',' not in template:
continue
# 过滤表达式:只保留包含英文逗号的
filtered_exprs = [expr for expr in template_item.get("expressions", []) if ',' in expr]
if filtered_exprs:
filtered_item = template_item.copy()
filtered_item["expressions"] = filtered_exprs
filtered_item["expression_count"] = len(filtered_exprs)
filtered_template_results.append(filtered_item)
print(f" 过滤后模板: {len(filtered_template_results)} 个 (原 {len(template_results)} 个)")
print("\n✅ 处理完成!")
# 打印统计
print("\n" + "=" * 50)
print("处理统计:")
print("=" * 50)
summary = {
"total_templates": len(block_pairs),
"normalized_templates": len(normalized_pairs),
"total_expressions": len(all_expressions),
"valid_expressions": len(valid_expressions),
"unique_expressions": len(unique_expressions),
"invalid_count": invalid_count
}
for key, value in summary.items():
print(f" {key}: {value}")
# 返回结果字典
return {
"success": True,
"summary": summary,
"templates": filtered_template_results,
"expressions": filtered_expressions,
"invalid_details": invalid_details
}