Files
strategy32/scripts/run_soft_router_search.py

320 lines
14 KiB
Python

from __future__ import annotations
import itertools
import json
import sys
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import slice_bundle
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness
from strategy32.research.hybrid_regime import _curve_returns, _run_adverse_component_curve
from strategy32.research.soft_router import (
WINDOWS,
YEAR_PERIODS,
YTD_START,
SoftRouterCandidate,
build_regime_score_frame,
compose_soft_router_curve,
evaluate_candidate_exact,
load_component_bundle,
score_candidate,
segment_metrics,
)
from strategy32.research.hybrid_regime import STATIC_FILTERS
OUT_JSON = Path("/tmp/strategy32_soft_router_search.json")
OUT_MD = Path("/Volumes/SSD/data/nextcloud/data/tara/files/📂HeadOffice/money-bot/strategy32/015_soft_router_탐색결과.md")
PROFILES = ("loose_positive",)
CORE_FILTERS = ("overheat_tolerant", "prev_balanced")
CAP_ENGINES = ("cap_btc_rebound",)
CHOP_ENGINES = ("chop_inverse_carry", "chop_inverse_carry_strict")
DIST_ENGINES = ("dist_inverse_carry_strict",)
CORE_FLOORS = (0.00, 0.10, 0.20)
CAP_MAX_WEIGHTS = (0.20, 0.35, 0.50)
CHOP_MAX_WEIGHTS = (0.10, 0.20, 0.35)
DIST_MAX_WEIGHTS = (0.10, 0.20, 0.35)
CHOP_BLEND_FLOORS = (0.00, 0.10, 0.20)
def _evaluate_from_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
window_results = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
year_results = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
window_results,
{k: v for k, v in year_results.items() if k != "2026_YTD"},
)
return window_results, year_results, score, negative_years, mdd_violations
def _exact_static_variant(bundle, latest_bar: pd.Timestamp, filter_name: str) -> dict[str, object]:
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
for days, label in WINDOWS:
eval_start = latest_bar - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=90)
from strategy29.backtest.window_analysis import slice_bundle
sliced = slice_bundle(bundle, raw_start, latest_bar)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
window_results[label] = segment_metrics(curve, eval_start, latest_bar)
for label, start, end_exclusive in YEAR_PERIODS:
eval_end = min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))
raw_start = start - pd.Timedelta(days=90)
from strategy29.backtest.window_analysis import slice_bundle
sliced = slice_bundle(bundle, raw_start, eval_end)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=start)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= start]
year_results[label] = segment_metrics(curve, start, eval_end)
raw_start = YTD_START - pd.Timedelta(days=90)
from strategy29.backtest.window_analysis import slice_bundle
sliced = slice_bundle(bundle, raw_start, latest_bar)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=YTD_START)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= YTD_START]
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
window_results,
{k: v for k, v in year_results.items() if k != "2026_YTD"},
)
return {
"name": filter_name,
"windows": window_results,
"years": year_results,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"validation": "exact_static_variant",
}
def main() -> None:
bundle, latest_bar = load_component_bundle()
eval_start = latest_bar - pd.Timedelta(days=1825)
raw_start = eval_start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, latest_bar)
precomputed: dict[str, object] = {"profiles": {}}
for profile_name in PROFILES:
score_frame = build_regime_score_frame(sliced, eval_start, latest_bar, profile_name=profile_name)
harness = AdverseRegimeResearchHarness(sliced, latest_bar)
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
core_returns: dict[str, pd.Series] = {}
adverse_returns: dict[str, pd.Series] = {}
for core_filter in CORE_FILTERS:
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[core_filter])
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
core_curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
core_returns[core_filter] = _curve_returns(core_curve)
print(f"[cache core] {profile_name}|{core_filter}", flush=True)
for engine_name in sorted(set(CAP_ENGINES) | set(CHOP_ENGINES) | set(DIST_ENGINES)):
adverse_curve = _run_adverse_component_curve(
eval_start=eval_start,
engine_name=engine_name,
harness=harness,
regime_frame=score_frame,
)
adverse_returns[engine_name] = _curve_returns(adverse_curve)
print(f"[cache adverse] {profile_name}|{engine_name}", flush=True)
precomputed["profiles"][profile_name] = {
"score_frame": score_frame,
"timestamps": timestamps,
"core_returns": core_returns,
"adverse_returns": adverse_returns,
}
candidates = [
SoftRouterCandidate(*combo)
for combo in itertools.product(
PROFILES,
CORE_FILTERS,
CAP_ENGINES,
CHOP_ENGINES,
DIST_ENGINES,
CORE_FLOORS,
CAP_MAX_WEIGHTS,
CHOP_MAX_WEIGHTS,
DIST_MAX_WEIGHTS,
CHOP_BLEND_FLOORS,
)
]
approx_rows: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
profile_cache = precomputed["profiles"][candidate.regime_profile]
components = {
"timestamps": profile_cache["timestamps"],
"score_frame": profile_cache["score_frame"],
"core_returns": profile_cache["core_returns"][candidate.core_filter],
"cap_returns": profile_cache["adverse_returns"][candidate.cap_engine],
"chop_returns": profile_cache["adverse_returns"][candidate.chop_engine],
"dist_returns": profile_cache["adverse_returns"][candidate.dist_engine],
}
curve, weights = compose_soft_router_curve(candidate=candidate, **components)
window_results, year_results, score, negative_years, mdd_violations = _evaluate_from_curve(curve, latest_bar)
approx_rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": window_results,
"years": year_results,
"avg_weights": {
"core": float(weights["core_weight"].mean()),
"cap": float(weights["cap_weight"].mean()),
"chop": float(weights["chop_weight"].mean()),
"dist": float(weights["dist_weight"].mean()),
"cash": float(weights["cash_weight"].mean()),
},
"validation": "approx_full_curve_slice",
}
)
if idx % 100 == 0 or idx == len(candidates):
print(
f"[approx {idx:04d}/{len(candidates)}] top={approx_rows[-1]['name']} "
f"1y={window_results['1y']['total_return'] * 100:.2f}% "
f"5y_ann={window_results['5y']['annualized_return'] * 100:.2f}%",
flush=True,
)
approx_rows.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
exact_top = []
for row in approx_rows[:3]:
candidate = SoftRouterCandidate(**row["candidate"])
print(f"[exact-start] {candidate.name}", flush=True)
result = evaluate_candidate_exact(bundle=bundle, latest_bar=latest_bar, candidate=candidate)
exact_top.append(result)
exact_top.sort(key=lambda item: (int(item["negative_years"]), int(item["mdd_violations"]), -float(item["score"])))
print(
f"[exact] {candidate.name} 1y={result['windows']['1y']['total_return'] * 100:.2f}% "
f"5y_ann={result['windows']['5y']['annualized_return'] * 100:.2f}% "
f"neg={result['negative_years']} mdd_viol={result['mdd_violations']}",
flush=True,
)
static_exact = [_exact_static_variant(bundle, latest_bar, filter_name) for filter_name in CORE_FILTERS]
payload = {
"analysis": "strategy32_soft_router_search",
"latest_completed_bar": str(latest_bar),
"candidate_count": len(candidates),
"component_cache_count": sum(
len(profile_cache["core_returns"]) + len(profile_cache["adverse_returns"])
for profile_cache in precomputed["profiles"].values()
),
"summary": approx_rows[:20],
"exact_top": exact_top,
"exact_static": static_exact,
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
lines = [
"# Strategy32 Soft Router 탐색결과",
"",
"## 1. 목적",
"",
"`5개 하드 레짐 -> 1엔진 선택` 구조를 버리고, `정적 코어 엔진 + adverse overlay` 구조를 연속형 점수 기반으로 탐색한다.",
"",
"## 2. 탐색 범위",
"",
f"- profiles: `{', '.join(PROFILES)}`",
f"- core filters: `{', '.join(CORE_FILTERS)}`",
f"- cap engines: `{', '.join(CAP_ENGINES)}`",
f"- chop engines: `{', '.join(CHOP_ENGINES)}`",
f"- dist engines: `{', '.join(DIST_ENGINES)}`",
f"- total candidates: `{len(candidates)}`",
"",
"## 3. exact 상위 후보",
"",
"| rank | candidate | 1y | 2y ann | 3y ann | 4y ann | 5y ann | 5y MDD | 2025 | 2024 |",
"|---|---|---:|---:|---:|---:|---:|---:|---:|---:|",
]
for idx, row in enumerate(exact_top, start=1):
lines.append(
f"| `{idx}` | `{row['name']}` | `{row['windows']['1y']['total_return'] * 100:.2f}%` | "
f"`{row['windows']['2y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['3y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['4y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['max_drawdown'] * 100:.2f}%` | "
f"`{row['years']['2025']['total_return'] * 100:.2f}%` | "
f"`{row['years']['2024']['total_return'] * 100:.2f}%` |"
)
lines.extend(
[
"",
"## 4. 정적 코어 exact 비교",
"",
"| core filter | 1y | 2y ann | 3y ann | 4y ann | 5y ann | 5y MDD | 2025 | 2024 |",
"|---|---:|---:|---:|---:|---:|---:|---:|---:|",
]
)
for row in static_exact:
lines.append(
f"| `{row['name']}` | `{row['windows']['1y']['total_return'] * 100:.2f}%` | "
f"`{row['windows']['2y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['3y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['4y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['max_drawdown'] * 100:.2f}%` | "
f"`{row['years']['2025']['total_return'] * 100:.2f}%` | "
f"`{row['years']['2024']['total_return'] * 100:.2f}%` |"
)
lines.extend(
[
"",
"## 5. 해석",
"",
"- soft router가 정적 코어보다 좋아지려면, adverse overlay가 `2024/2025 방어`를 만들어내면서 `5y CAGR`을 크게 훼손하지 않아야 한다.",
"- exact 결과가 정적 코어보다 약하면, 현재 adverse overlay 신호 품질 또는 overlay weight 공식이 아직 최적이 아니라는 뜻이다.",
"",
"## 6. 원본 결과",
"",
f"- JSON: [{OUT_JSON}]({OUT_JSON})",
]
)
OUT_MD.write_text("\n".join(lines) + "\n", encoding="utf-8")
if __name__ == "__main__":
main()