You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
manual-tools/clash/clash_check_now_node.py

80 lines
2.7 KiB

# -*- coding: utf-8 -*-
# 检查所有节点是否有重复
import asyncio
import httpx
from typing import Optional, Dict, Any, List, Tuple
async def check_now_node(url_and_port: str) -> Optional[str]:
"""检测当前节点并设置全局代理"""
async with httpx.AsyncClient(timeout=10.0) as client:
try:
# 设置全局模式
set_url = f"http://{url_and_port}/api/configs"
set_data = {"mode": "Global"}
set_response = await client.patch(set_url, json=set_data)
set_response.raise_for_status()
# 获取代理信息
get_url = f"http://{url_and_port}/api/proxies"
get_response = await client.get(get_url)
get_response.raise_for_status()
json_data = get_response.json()
proxies: Dict[str, Any] = json_data.get("proxies", {})
proxy_global: Dict[str, Any] = proxies.get("GLOBAL", {})
now_proxy: Optional[str] = proxy_global.get("now")
return now_proxy
except httpx.HTTPError as exc:
print(f"请求失败 {url_and_port}: {exc}")
return None
async def batch_check_nodes(ip: str, ports: List[str]) -> Dict[str, Optional[str]]:
"""批量检测节点"""
tasks = [check_now_node(f"{ip}:{port}") for port in ports]
results = await asyncio.gather(*tasks)
return {
f"{ip}:{port}": result
for port, result in zip(ports, results)
}
def find_duplicate_nodes(results: Dict[str, Optional[str]]) -> List[Tuple[str, str]]:
"""查找重复的节点"""
node_to_urls = {}
for url, node in results.items():
if node: # 只处理成功检测的节点
if node not in node_to_urls:
node_to_urls[node] = []
node_to_urls[node].append(url)
# 找出有重复的节点
duplicates = []
for node, urls in node_to_urls.items():
if len(urls) > 1:
for i in range(len(urls)):
for j in range(i + 1, len(urls)):
duplicates.append((urls[i], urls[j]))
return duplicates
if __name__ == "__main__":
ip = '192.168.31.201'
ports = [f'{58000 + i}' for i in range(1, 11)]
results = asyncio.run(batch_check_nodes(ip, ports))
# 输出所有节点信息
for url, node in results.items():
print(f"{url}: {node or '检测失败'}")
# 检查并输出重复节点
duplicates = find_duplicate_nodes(results)
if duplicates:
print("\n发现重复节点:")
for url1, url2 in duplicates:
print(f"{url1}{url2} 重复")
else:
print("\n没有发现重复节点")