|
|
@@ -1,5 +1,6 @@
|
|
|
#!/usr/bin/env python3
|
|
|
import argparse
|
|
|
+import csv
|
|
|
import datetime as dt
|
|
|
import functools
|
|
|
import json
|
|
|
@@ -55,6 +56,24 @@ def build_url(base_url, params):
|
|
|
return urllib.parse.urlunparse(parsed._replace(query=query))
|
|
|
|
|
|
|
|
|
+def resolve_path(base_dir, path_value):
|
|
|
+ path_text = str(path_value or "").strip()
|
|
|
+ if not path_text:
|
|
|
+ return ""
|
|
|
+ if os.path.isabs(path_text):
|
|
|
+ return os.path.normpath(path_text)
|
|
|
+ return os.path.normpath(os.path.join(base_dir, path_text))
|
|
|
+
|
|
|
+
|
|
|
+def get_source_type(cfg):
|
|
|
+ source_cfg = cfg.get("source", {})
|
|
|
+ if isinstance(source_cfg, dict):
|
|
|
+ source_type = str(source_cfg.get("type", "api")).strip().lower()
|
|
|
+ if source_type:
|
|
|
+ return source_type
|
|
|
+ return "api"
|
|
|
+
|
|
|
+
|
|
|
def fetch_api_json(cfg):
|
|
|
api = cfg["api"]
|
|
|
url = build_url(api["url"], api.get("params", {}))
|
|
|
@@ -73,6 +92,97 @@ def fetch_api_json(cfg):
|
|
|
return json.loads(raw)
|
|
|
|
|
|
|
|
|
+def load_cfst_rows(cfg, config_path_abs):
|
|
|
+ cfst_cfg = cfg.get("cfst_local", {})
|
|
|
+ config_dir = os.path.dirname(config_path_abs)
|
|
|
+
|
|
|
+ work_dir = resolve_path(config_dir, cfst_cfg.get("work_dir", "./cfst"))
|
|
|
+ binary_path = resolve_path(work_dir, cfst_cfg.get("binary", "./cfst"))
|
|
|
+ result_file = resolve_path(work_dir, cfst_cfg.get("result_file", "result.csv"))
|
|
|
+ encoding = str(cfst_cfg.get("encoding", "utf-8")).strip() or "utf-8"
|
|
|
+ skip_run = bool(cfst_cfg.get("skip_run", False))
|
|
|
+ timeout_sec = int(cfst_cfg.get("run_timeout_sec", 600))
|
|
|
+
|
|
|
+ run_args = cfst_cfg.get("run_args", ["-o", os.path.basename(result_file)])
|
|
|
+ if not isinstance(run_args, list):
|
|
|
+ raise ValueError("cfst_local.run_args must be an array")
|
|
|
+ command = [binary_path] + [str(x) for x in run_args]
|
|
|
+
|
|
|
+ if not skip_run:
|
|
|
+ completed = subprocess.run(
|
|
|
+ command,
|
|
|
+ cwd=work_dir,
|
|
|
+ check=False,
|
|
|
+ capture_output=True,
|
|
|
+ text=True,
|
|
|
+ encoding=encoding,
|
|
|
+ errors="replace",
|
|
|
+ timeout=timeout_sec,
|
|
|
+ )
|
|
|
+ if completed.returncode != 0:
|
|
|
+ stderr = (completed.stderr or "").strip()
|
|
|
+ stdout = (completed.stdout or "").strip()
|
|
|
+ details = stderr or stdout or f"exit code {completed.returncode}"
|
|
|
+ raise RuntimeError(f"cfst run failed: {details}")
|
|
|
+
|
|
|
+ if not os.path.exists(result_file):
|
|
|
+ raise RuntimeError(f"cfst result file not found: {result_file}")
|
|
|
+
|
|
|
+ with open(result_file, "r", encoding=encoding, errors="replace", newline="") as f:
|
|
|
+ reader = csv.reader(f)
|
|
|
+ rows = [row for row in reader if any(str(col).strip() for col in row)]
|
|
|
+
|
|
|
+ header_rows = int(cfst_cfg.get("header_rows", 1))
|
|
|
+ if len(rows) <= header_rows:
|
|
|
+ raise RuntimeError("cfst result has no data rows")
|
|
|
+
|
|
|
+ columns_cfg = cfst_cfg.get("columns", {})
|
|
|
+ if not isinstance(columns_cfg, dict):
|
|
|
+ raise ValueError("cfst_local.columns must be an object")
|
|
|
+
|
|
|
+ def col_index(name, default_index):
|
|
|
+ raw = columns_cfg.get(name, default_index)
|
|
|
+ try:
|
|
|
+ idx = int(raw)
|
|
|
+ except Exception as exc:
|
|
|
+ raise ValueError(f"cfst_local.columns.{name} must be an integer") from exc
|
|
|
+ if idx < 0:
|
|
|
+ raise ValueError(f"cfst_local.columns.{name} must be >= 0")
|
|
|
+ return idx
|
|
|
+
|
|
|
+ ip_idx = col_index("ip", 0)
|
|
|
+ sent_idx = col_index("sent", 1)
|
|
|
+ received_idx = col_index("received", 2)
|
|
|
+ loss_idx = col_index("loss_rate", 3)
|
|
|
+ latency_idx = col_index("avg_latency", 4)
|
|
|
+ speed_idx = col_index("download_speed", 5)
|
|
|
+ region_idx = col_index("region", 6)
|
|
|
+
|
|
|
+ out = []
|
|
|
+ for row in rows[header_rows:]:
|
|
|
+ if ip_idx >= len(row):
|
|
|
+ continue
|
|
|
+ domain = normalize_domain(row[ip_idx])
|
|
|
+ if not domain:
|
|
|
+ continue
|
|
|
+ out.append(
|
|
|
+ {
|
|
|
+ "domain": domain,
|
|
|
+ "ip": domain,
|
|
|
+ "sent": row[sent_idx].strip() if sent_idx < len(row) else "",
|
|
|
+ "received": row[received_idx].strip() if received_idx < len(row) else "",
|
|
|
+ "loss_rate": row[loss_idx].strip() if loss_idx < len(row) else "",
|
|
|
+ "avg_latency": row[latency_idx].strip() if latency_idx < len(row) else "",
|
|
|
+ "download_speed": row[speed_idx].strip() if speed_idx < len(row) else "",
|
|
|
+ "region": row[region_idx].strip() if region_idx < len(row) else "",
|
|
|
+ }
|
|
|
+ )
|
|
|
+
|
|
|
+ if not out:
|
|
|
+ raise RuntimeError("cfst result parsed to zero valid rows")
|
|
|
+ return out
|
|
|
+
|
|
|
+
|
|
|
def flatten_values(value):
|
|
|
out = []
|
|
|
if isinstance(value, str):
|
|
|
@@ -230,6 +340,40 @@ def extract_records(payload, record_mapping):
|
|
|
|
|
|
|
|
|
def validate_config(cfg):
|
|
|
+ source_type = get_source_type(cfg)
|
|
|
+ if source_type not in {"api", "cfst_local"}:
|
|
|
+ raise ValueError("source.type must be 'api' or 'cfst_local'")
|
|
|
+
|
|
|
+ output_cfg = cfg.get("output", {})
|
|
|
+ if output_cfg and not isinstance(output_cfg, dict):
|
|
|
+ raise ValueError("output must be an object")
|
|
|
+
|
|
|
+ if source_type == "cfst_local":
|
|
|
+ cfst_cfg = cfg.get("cfst_local")
|
|
|
+ if not isinstance(cfst_cfg, dict):
|
|
|
+ raise ValueError("cfst_local is required and must be an object when source.type=cfst_local")
|
|
|
+
|
|
|
+ work_dir = str(cfst_cfg.get("work_dir", "")).strip()
|
|
|
+ if not work_dir:
|
|
|
+ raise ValueError("cfst_local.work_dir is required")
|
|
|
+
|
|
|
+ binary = str(cfst_cfg.get("binary", "")).strip()
|
|
|
+ if not binary:
|
|
|
+ raise ValueError("cfst_local.binary is required")
|
|
|
+
|
|
|
+ result_file = str(cfst_cfg.get("result_file", "")).strip()
|
|
|
+ if not result_file:
|
|
|
+ raise ValueError("cfst_local.result_file is required")
|
|
|
+
|
|
|
+ run_args = cfst_cfg.get("run_args", [])
|
|
|
+ if not isinstance(run_args, list):
|
|
|
+ raise ValueError("cfst_local.run_args must be an array")
|
|
|
+
|
|
|
+ columns_cfg = cfst_cfg.get("columns", {})
|
|
|
+ if columns_cfg and not isinstance(columns_cfg, dict):
|
|
|
+ raise ValueError("cfst_local.columns must be an object")
|
|
|
+ return
|
|
|
+
|
|
|
record_mapping = cfg.get("record_mapping")
|
|
|
if not isinstance(record_mapping, dict):
|
|
|
raise ValueError("record_mapping is required and must be an object")
|
|
|
@@ -766,9 +910,41 @@ def choose_domain(filtered_domains, check_results, top_n, ranked_scored):
|
|
|
return None, []
|
|
|
|
|
|
|
|
|
+def build_output_settings(output_cfg, config_path_abs):
|
|
|
+ runtime_dir_cfg = output_cfg.get("runtime_dir", "./runtime")
|
|
|
+ runtime_dir = resolve_path(os.path.dirname(config_path_abs), runtime_dir_cfg)
|
|
|
+
|
|
|
+ selected_text_name = output_cfg.get("selected_value_file", output_cfg.get("current_domain_file", "current_domain.txt"))
|
|
|
+ selected_json_name = output_cfg.get("selected_value_json", output_cfg.get("current_domain_json", "current_domain.json"))
|
|
|
+ state_name = output_cfg.get("state_file", "state.json")
|
|
|
+ vars_name = output_cfg.get("export_vars_file", output_cfg.get("substore_vars_file", "substore_vars.json"))
|
|
|
+
|
|
|
+ return {
|
|
|
+ "runtime_dir": runtime_dir,
|
|
|
+ "selected_text_path": os.path.join(runtime_dir, selected_text_name),
|
|
|
+ "selected_json_path": os.path.join(runtime_dir, selected_json_name),
|
|
|
+ "state_path": os.path.join(runtime_dir, state_name),
|
|
|
+ "vars_path": os.path.join(runtime_dir, vars_name),
|
|
|
+ "selected_json_key": str(output_cfg.get("selected_value_json_key", "domain")).strip() or "domain",
|
|
|
+ "state_last_good_key": str(output_cfg.get("state_last_good_key", "last_good_domain")).strip() or "last_good_domain",
|
|
|
+ "vars_value_key": str(output_cfg.get("substore_value_key", "AUTO_DOMAIN")).strip() or "AUTO_DOMAIN",
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+def print_output_settings(config_path_abs, cfg):
|
|
|
+ output_cfg = cfg.get("output", {})
|
|
|
+ settings = build_output_settings(output_cfg, config_path_abs)
|
|
|
+ print(json.dumps(settings, ensure_ascii=True))
|
|
|
+
|
|
|
+
|
|
|
def main():
|
|
|
ap = argparse.ArgumentParser(description="Auto select VMess preferred domain")
|
|
|
- ap.add_argument("--config", default="config.json", help="Path to config JSON")
|
|
|
+ ap.add_argument("--config", default="config.server.json", help="Path to config JSON")
|
|
|
+ ap.add_argument(
|
|
|
+ "--print-output-settings",
|
|
|
+ action="store_true",
|
|
|
+ help="Print resolved output settings as JSON and exit",
|
|
|
+ )
|
|
|
args = ap.parse_args()
|
|
|
|
|
|
config_path_abs = os.path.abspath(args.config)
|
|
|
@@ -784,72 +960,97 @@ def main():
|
|
|
print(json.dumps({"status": "error", "error": f"invalid config: {e}"}, ensure_ascii=True), file=sys.stderr)
|
|
|
sys.exit(1)
|
|
|
|
|
|
+ if args.print_output_settings:
|
|
|
+ print_output_settings(config_path_abs, cfg)
|
|
|
+ return
|
|
|
+
|
|
|
output_cfg = cfg.get("output", {})
|
|
|
- runtime_dir_cfg = output_cfg.get("runtime_dir", "./runtime")
|
|
|
- if os.path.isabs(runtime_dir_cfg):
|
|
|
- runtime_dir = runtime_dir_cfg
|
|
|
- else:
|
|
|
- runtime_dir = os.path.normpath(os.path.join(os.path.dirname(config_path_abs), runtime_dir_cfg))
|
|
|
+ output_settings = build_output_settings(output_cfg, config_path_abs)
|
|
|
v2_cfg = cfg.get("v2ray", {})
|
|
|
notify_cfg = cfg.get("notify", {})
|
|
|
-
|
|
|
- current_domain_file = os.path.join(runtime_dir, output_cfg.get("current_domain_file", "current_domain.txt"))
|
|
|
- current_domain_json = os.path.join(runtime_dir, output_cfg.get("current_domain_json", "current_domain.json"))
|
|
|
- state_file = os.path.join(runtime_dir, output_cfg.get("state_file", "state.json"))
|
|
|
- substore_vars_file = os.path.join(runtime_dir, output_cfg.get("substore_vars_file", "substore_vars.json"))
|
|
|
+ selected_text_file = output_settings["selected_text_path"]
|
|
|
+ selected_json_file = output_settings["selected_json_path"]
|
|
|
+ state_file = output_settings["state_path"]
|
|
|
+ vars_file = output_settings["vars_path"]
|
|
|
+ selected_json_key = output_settings["selected_json_key"]
|
|
|
+ state_last_good_key = output_settings["state_last_good_key"]
|
|
|
+ vars_value_key = output_settings["vars_value_key"]
|
|
|
|
|
|
state = read_json_file(state_file, default={})
|
|
|
- last_good = state.get("last_good_domain", "")
|
|
|
+ last_good = state.get(state_last_good_key, "")
|
|
|
+ source_type = get_source_type(cfg)
|
|
|
|
|
|
try:
|
|
|
- payload = fetch_api_json(cfg)
|
|
|
- parsed = parse_domains(payload, cfg.get("parser", {}))
|
|
|
- filtered = apply_filter(parsed, cfg.get("domain_filter", {}))
|
|
|
+ top_n = int(cfg.get("selection", {}).get("top_n", 3))
|
|
|
+ check_results = []
|
|
|
+ payload = None
|
|
|
+
|
|
|
+ if source_type == "cfst_local":
|
|
|
+ cfst_rows = load_cfst_rows(cfg, config_path_abs)
|
|
|
+ parsed = [row["domain"] for row in cfst_rows]
|
|
|
+ filtered = apply_filter(parsed, cfg.get("domain_filter", {}))
|
|
|
+ filtered_set = set(filtered)
|
|
|
+ cfst_rows = [row for row in cfst_rows if row["domain"] in filtered_set]
|
|
|
+ if not cfst_rows:
|
|
|
+ raise RuntimeError("No valid IP available from cfst result after filtering")
|
|
|
+
|
|
|
+ if cfg.get("healthcheck", {}).get("enabled", False):
|
|
|
+ check_results = check_domains(filtered, cfg.get("healthcheck", {}))
|
|
|
+ selected, _ = choose_domain(filtered, check_results, top_n, [])
|
|
|
+ top_candidates = cfst_rows[:top_n]
|
|
|
+ else:
|
|
|
+ selected = cfst_rows[0]["domain"]
|
|
|
+ top_candidates = cfst_rows[:top_n]
|
|
|
+ else:
|
|
|
+ payload = fetch_api_json(cfg)
|
|
|
+ parsed = parse_domains(payload, cfg.get("parser", {}))
|
|
|
+ filtered = apply_filter(parsed, cfg.get("domain_filter", {}))
|
|
|
|
|
|
- record_mapping_cfg = cfg.get("record_mapping", {})
|
|
|
- field_map = record_mapping_cfg.get("field_map", {})
|
|
|
- records = extract_records(payload, record_mapping_cfg)
|
|
|
+ record_mapping_cfg = cfg.get("record_mapping", {})
|
|
|
+ field_map = record_mapping_cfg.get("field_map", {})
|
|
|
+ records = extract_records(payload, record_mapping_cfg)
|
|
|
|
|
|
- record_filter_cfg = cfg.get("record_filter", {})
|
|
|
- blocked_domains = collect_excluded_domains(records, field_map, record_filter_cfg)
|
|
|
- if blocked_domains:
|
|
|
- filtered = [d for d in filtered if d not in blocked_domains]
|
|
|
+ record_filter_cfg = cfg.get("record_filter", {})
|
|
|
+ blocked_domains = collect_excluded_domains(records, field_map, record_filter_cfg)
|
|
|
+ if blocked_domains:
|
|
|
+ filtered = [d for d in filtered if d not in blocked_domains]
|
|
|
|
|
|
- scoring_cfg = cfg.get("scoring", {})
|
|
|
- scored_records = parse_scored_records(records, field_map, record_mapping_cfg, scoring_cfg)
|
|
|
- filtered_set = set(filtered)
|
|
|
- scored_records = [r for r in scored_records if r["domain"] in filtered_set]
|
|
|
- ranked_scored = rank_scored_records(scored_records, scoring_cfg)
|
|
|
+ scoring_cfg = cfg.get("scoring", {})
|
|
|
+ scored_records = parse_scored_records(records, field_map, record_mapping_cfg, scoring_cfg)
|
|
|
+ filtered_set = set(filtered)
|
|
|
+ scored_records = [r for r in scored_records if r["domain"] in filtered_set]
|
|
|
+ ranked_scored = rank_scored_records(scored_records, scoring_cfg)
|
|
|
|
|
|
- check_results = []
|
|
|
- if cfg.get("healthcheck", {}).get("enabled", True):
|
|
|
- check_results = check_domains(filtered, cfg.get("healthcheck", {}))
|
|
|
+ if cfg.get("healthcheck", {}).get("enabled", True):
|
|
|
+ check_results = check_domains(filtered, cfg.get("healthcheck", {}))
|
|
|
|
|
|
- top_n = int(cfg.get("selection", {}).get("top_n", 3))
|
|
|
- selected, top_candidates = choose_domain(filtered, check_results, top_n, ranked_scored)
|
|
|
+ selected, top_candidates = choose_domain(filtered, check_results, top_n, ranked_scored)
|
|
|
|
|
|
status = "ok"
|
|
|
if not selected and last_good:
|
|
|
selected = last_good
|
|
|
status = "fallback_last_good"
|
|
|
if not selected:
|
|
|
+ if source_type == "cfst_local":
|
|
|
+ raise RuntimeError("No valid IP available from cfst and no fallback in state")
|
|
|
raise RuntimeError("No valid domain available from API and no fallback in state")
|
|
|
|
|
|
- write_text_file(current_domain_file, selected + "\n")
|
|
|
+ write_text_file(selected_text_file, selected + "\n")
|
|
|
|
|
|
current_json = {
|
|
|
- "domain": selected,
|
|
|
+ selected_json_key: selected,
|
|
|
"updated_at": utc_now_iso(),
|
|
|
"status": status,
|
|
|
+ "source_type": source_type,
|
|
|
"source_count": len(parsed),
|
|
|
"checked_count": len(check_results),
|
|
|
"top_candidates": top_candidates,
|
|
|
}
|
|
|
- write_json_file(current_domain_json, current_json)
|
|
|
+ write_json_file(selected_json_file, current_json)
|
|
|
write_json_file(
|
|
|
- substore_vars_file,
|
|
|
+ vars_file,
|
|
|
{
|
|
|
- "AUTO_DOMAIN": selected,
|
|
|
+ vars_value_key: selected,
|
|
|
"UPDATED_AT": current_json["updated_at"],
|
|
|
"STATUS": status,
|
|
|
},
|
|
|
@@ -864,11 +1065,12 @@ def main():
|
|
|
|
|
|
new_state = {
|
|
|
"updated_at": current_json["updated_at"],
|
|
|
- "last_good_domain": selected,
|
|
|
+ state_last_good_key: selected,
|
|
|
"status": status,
|
|
|
"source_count": len(parsed),
|
|
|
"checked_count": len(check_results),
|
|
|
"rendered_v2ray": rendered,
|
|
|
+ "source_type": source_type,
|
|
|
}
|
|
|
write_json_file(state_file, new_state)
|
|
|
|
|
|
@@ -881,19 +1083,21 @@ def main():
|
|
|
"updated_at": now,
|
|
|
"status": "error",
|
|
|
"error": str(e),
|
|
|
- "last_good_domain": last_good,
|
|
|
+ state_last_good_key: last_good,
|
|
|
+ "source_type": source_type,
|
|
|
}
|
|
|
write_json_file(state_file, err_state)
|
|
|
|
|
|
if last_good:
|
|
|
- write_text_file(current_domain_file, last_good + "\n")
|
|
|
+ write_text_file(selected_text_file, last_good + "\n")
|
|
|
write_json_file(
|
|
|
- current_domain_json,
|
|
|
+ selected_json_file,
|
|
|
{
|
|
|
- "domain": last_good,
|
|
|
+ selected_json_key: last_good,
|
|
|
"updated_at": now,
|
|
|
"status": "error_use_last_good",
|
|
|
"error": str(e),
|
|
|
+ "source_type": source_type,
|
|
|
},
|
|
|
)
|
|
|
run_notify(notify_cfg.get("command", ""), last_good, "error_use_last_good")
|