Initial strategy32 research and live runtime

This commit is contained in:
2026-03-16 20:18:41 -07:00
commit c165a9add7
42 changed files with 10750 additions and 0 deletions

1
scripts/__init__.py Normal file
View File

@@ -0,0 +1 @@
from __future__ import annotations

View File

@@ -0,0 +1,104 @@
from __future__ import annotations
import copy
import json
import sys
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V5_BASELINE, build_strategy32_config
from strategy32.data import build_strategy32_market_bundle
WINDOWS = [(30, "1m"), (365, "1y"), (1095, "3y"), (1825, "5y")]
def build_variants() -> list[tuple[str, dict[str, bool]]]:
return [
("baseline_v5", {}),
("no_sideways", {"enable_sideways_engine": False}),
("strong_kill_switch", {"enable_strong_kill_switch": True}),
("daily_trend_filter", {"enable_daily_trend_filter": True}),
("expanded_hedge", {"enable_expanded_hedge": True}),
("max_holding_exit", {"enable_max_holding_exit": True}),
]
def main() -> None:
base = build_strategy32_config(PROFILE_V5_BASELINE)
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
start = end - pd.Timedelta(days=max(days for days, _ in WINDOWS) + base.warmup_days + 14)
print("fetching bundle...")
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle(
symbols=base.symbols,
auto_discover_symbols=base.auto_discover_symbols,
quote_assets=base.quote_assets,
excluded_base_assets=base.excluded_base_assets,
min_quote_volume_24h=base.discovery_min_quote_volume_24h,
start=start,
end=end,
timeframe=base.timeframe,
max_staleness_days=base.max_symbol_staleness_days,
)
print("latest", latest_completed_bar)
results: dict[str, dict[str, dict[str, float | int | str]]] = {}
for name, overrides in build_variants():
cfg = copy.deepcopy(base)
for key, value in overrides.items():
setattr(cfg, key, value)
variant_results = {}
print("\nVARIANT", name)
for days, label in WINDOWS:
eval_end = latest_completed_bar
eval_start = eval_end - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
sliced = slice_bundle(bundle, raw_start, eval_end)
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
metrics["engine_pnl"] = result.engine_pnl
metrics["total_trades"] = result.total_trades
variant_results[label] = metrics
print(
label,
"ret",
round(float(metrics["total_return"]) * 100, 2),
"mdd",
round(float(metrics["max_drawdown"]) * 100, 2),
"sharpe",
round(float(metrics["sharpe"]), 2),
"trades",
metrics["trade_count"],
)
results[name] = variant_results
payload = {
"strategy": "strategy32",
"analysis": "v6_single_change_ablation",
"initial_capital": 1000.0,
"auto_discover_symbols": base.auto_discover_symbols,
"latest_completed_bar": str(latest_completed_bar),
"requested_symbols": [] if base.auto_discover_symbols else base.symbols,
"accepted_symbols": accepted_symbols,
"rejected_symbols": rejected_symbols,
"quote_by_symbol": quote_by_symbol,
"timeframe": base.timeframe,
"results": results,
}
out = Path("/tmp/strategy32_v6_ablation.json")
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print("\nwrote", out)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,39 @@
from __future__ import annotations
import json
import sys
from pathlib import Path
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.research.adverse_regime import run_adverse_regime_search
def main() -> None:
payload = run_adverse_regime_search(
cache_path="/tmp/strategy32_fixed66_bundle.pkl",
eval_days=1825,
initial_capital=1000.0,
)
out = Path("/tmp/strategy32_adverse_regime_engine_search.json")
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
for regime, rows in payload["by_regime"].items():
print(regime)
for row in rows:
print(
" ",
row["name"],
f"ret={float(row['total_return']) * 100:.2f}%",
f"sharpe={float(row['sharpe']):.2f}",
f"mdd={float(row['max_drawdown']) * 100:.2f}%",
f"active={float(row['active_bar_ratio']) * 100:.2f}%",
f"rebalance={int(row['rebalance_count'])}",
)
print(f"wrote {out}")
if __name__ == "__main__":
main()

139
scripts/run_backtest.py Normal file
View File

@@ -0,0 +1,139 @@
from __future__ import annotations
import argparse
import json
import sys
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V5_BASELINE, PROFILE_V7_DEFAULT, build_strategy32_config
from strategy32.data import (
build_strategy32_market_bundle_from_specs,
build_strategy32_price_frames_from_specs,
resolve_strategy32_pair_specs,
)
DEFAULT_WINDOWS = [365, 1095, 1825]
def _slice_price_frames(
prices: dict[str, pd.DataFrame],
start: pd.Timestamp,
end: pd.Timestamp,
) -> dict[str, pd.DataFrame]:
sliced: dict[str, pd.DataFrame] = {}
for symbol, df in prices.items():
frame = df.loc[(df["timestamp"] >= start) & (df["timestamp"] <= end)].copy()
if not frame.empty:
sliced[symbol] = frame.reset_index(drop=True)
return sliced
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Run Strategy32 backtest on Binance data")
parser.add_argument("--profile", default=PROFILE_V7_DEFAULT, choices=[PROFILE_V5_BASELINE, PROFILE_V7_DEFAULT])
parser.add_argument("--symbols", default="")
parser.add_argument("--windows", default=",".join(str(days) for days in DEFAULT_WINDOWS))
parser.add_argument("--warmup-days", type=int, default=90)
parser.add_argument("--timeframe", default="4h")
parser.add_argument("--out", default="/tmp/strategy32_backtest_v0.json")
return parser.parse_args()
def main() -> None:
args = parse_args()
strategy_config = build_strategy32_config(args.profile)
if args.symbols:
strategy_config.symbols = [symbol.strip().upper() for symbol in args.symbols.split(",") if symbol.strip()]
strategy_config.auto_discover_symbols = False
strategy_config.timeframe = args.timeframe
strategy_config.warmup_days = args.warmup_days
windows = [int(token.strip()) for token in args.windows.split(",") if token.strip()]
end = pd.Timestamp.utcnow()
if end.tzinfo is None:
end = end.tz_localize("UTC")
else:
end = end.tz_convert("UTC")
start = end - pd.Timedelta(days=max(windows) + strategy_config.warmup_days + 14)
specs = resolve_strategy32_pair_specs(
symbols=strategy_config.symbols,
auto_discover_symbols=strategy_config.auto_discover_symbols,
quote_assets=strategy_config.quote_assets,
excluded_base_assets=strategy_config.excluded_base_assets,
min_quote_volume_24h=strategy_config.discovery_min_quote_volume_24h,
)
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle_from_specs(
specs=specs,
start=start,
end=end,
timeframe=strategy_config.timeframe,
max_staleness_days=strategy_config.max_symbol_staleness_days,
)
accepted_specs = [spec for spec in specs if spec.base_symbol in set(accepted_symbols)]
execution_prices, _, execution_accepted, execution_rejected, _ = build_strategy32_price_frames_from_specs(
specs=accepted_specs,
start=start,
end=end,
timeframe=strategy_config.execution_refinement_timeframe,
max_staleness_days=strategy_config.max_symbol_staleness_days,
)
results = {}
for days in windows:
label = "1y" if days == 365 else "3y" if days == 1095 else "5y" if days == 1825 else f"{days}d"
eval_end = latest_completed_bar
eval_start = eval_end - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=strategy_config.warmup_days)
sliced = slice_bundle(bundle, raw_start, eval_end)
execution_slice = _slice_price_frames(
execution_prices,
raw_start - pd.Timedelta(hours=24),
eval_end,
)
result = Strategy32Backtester(
strategy_config,
sliced,
trade_start=eval_start,
execution_prices=execution_slice,
).run()
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=6)
metrics["engine_pnl"] = result.engine_pnl
metrics["total_trades"] = result.total_trades
metrics["rejection_summary"] = result.metadata.get("rejection_summary", {})
results[label] = metrics
payload = {
"strategy": "strategy32",
"profile": args.profile,
"auto_discover_symbols": strategy_config.auto_discover_symbols,
"latest_completed_bar": str(latest_completed_bar),
"warmup_days": strategy_config.warmup_days,
"requested_symbols": [] if strategy_config.auto_discover_symbols else strategy_config.symbols,
"accepted_symbols": accepted_symbols,
"rejected_symbols": rejected_symbols,
"execution_refinement_timeframe": strategy_config.execution_refinement_timeframe,
"execution_refinement_symbols": execution_accepted,
"execution_refinement_rejected": execution_rejected,
"quote_by_symbol": quote_by_symbol,
"timeframe": strategy_config.timeframe,
"results": results,
}
target = Path(args.out)
target.parent.mkdir(parents=True, exist_ok=True)
target.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"Wrote {target}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,436 @@
from __future__ import annotations
import itertools
import json
import sys
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import slice_bundle
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
from strategy32.research.hybrid_regime import STATIC_FILTERS
from strategy32.research.soft_router import (
WINDOWS,
YEAR_PERIODS,
YTD_START,
CashOverlayCandidate,
build_cash_overlay_period_components,
compose_cash_overlay_curve,
evaluate_cash_overlay_exact,
load_component_bundle,
score_candidate,
segment_metrics,
)
OUT_JSON = Path("/tmp/strategy32_cash_overlay_search.json")
OUT_MD = Path("/Volumes/SSD/data/nextcloud/data/tara/files/📂HeadOffice/money-bot/strategy32/017_cash_overlay_탐색결과.md")
SOFT_JSON = Path("/tmp/strategy32_best_soft_exact.json")
PROFILE = "loose_positive"
CORE_FILTER = "overheat_tolerant"
CAP_ENGINE = "cap_btc_rebound"
CHOP_ENGINE = "chop_inverse_carry_strict"
DIST_ENGINE = "dist_inverse_carry_strict"
STATIC_BASELINE = {
"name": "overheat_tolerant",
"windows": {
"1y": {"total_return": 0.1477, "annualized_return": 0.1477, "max_drawdown": -0.1229},
"2y": {"total_return": 0.2789, "annualized_return": 0.1309, "max_drawdown": -0.1812},
"3y": {"total_return": 0.4912, "annualized_return": 0.1425, "max_drawdown": -0.1931},
"4y": {"total_return": 0.3682, "annualized_return": 0.0815, "max_drawdown": -0.3461},
"5y": {"total_return": 3.7625, "annualized_return": 0.3664, "max_drawdown": -0.2334},
},
"years": {
"2026_YTD": {"total_return": 0.0, "max_drawdown": 0.0},
"2025": {"total_return": 0.0426, "max_drawdown": -0.1323},
"2024": {"total_return": 0.1951, "max_drawdown": -0.2194},
"2023": {"total_return": 0.4670, "max_drawdown": -0.2155},
"2022": {"total_return": 0.0147, "max_drawdown": -0.0662},
"2021": {"total_return": 1.9152, "max_drawdown": -0.1258},
},
}
EXPOSURE_SUMMARY = {
"avg_cash_pct": 0.9379,
"median_cash_pct": 1.0,
"cash_gt_50_pct": 0.9469,
"cash_gt_80_pct": 0.9068,
"avg_momentum_pct": 0.0495,
"avg_carry_pct": 0.0126,
}
CAP_CASH_WEIGHTS = (0.20, 0.35, 0.50, 0.65)
CHOP_CASH_WEIGHTS = (0.10, 0.20, 0.30, 0.40)
DIST_CASH_WEIGHTS = (0.05, 0.10, 0.15, 0.20)
CAP_THRESHOLDS = (0.20, 0.35, 0.50)
CHOP_THRESHOLDS = (0.35, 0.50, 0.65)
DIST_THRESHOLDS = (0.35, 0.50, 0.65)
CORE_BLOCK_THRESHOLDS = (0.45, 0.60, 0.75)
def _evaluate_from_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
window_results = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
year_results = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
window_results,
{k: v for k, v in year_results.items() if k != "2026_YTD"},
)
return window_results, year_results, score, negative_years, mdd_violations
def _exact_static_variant(bundle, latest_bar: pd.Timestamp, filter_name: str) -> dict[str, object]:
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
for days, label in WINDOWS:
eval_start = latest_bar - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, latest_bar)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
window_results[label] = segment_metrics(curve, eval_start, latest_bar)
for label, start, end_exclusive in YEAR_PERIODS:
eval_end = min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))
raw_start = start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, eval_end)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=start)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= start]
year_results[label] = segment_metrics(curve, start, eval_end)
raw_start = YTD_START - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, latest_bar)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=YTD_START)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= YTD_START]
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
window_results,
{k: v for k, v in year_results.items() if k != "2026_YTD"},
)
return {
"name": filter_name,
"windows": window_results,
"years": year_results,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"validation": "exact_static_variant",
}
def _core_exposure_summary(bundle, latest_bar: pd.Timestamp) -> dict[str, float]:
eval_start = latest_bar - pd.Timedelta(days=1825)
raw_start = eval_start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, latest_bar)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[CORE_FILTER])
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
exposure_frame = pd.DataFrame(result.metadata.get("exposure_rows", []))
exposure_frame = exposure_frame.loc[exposure_frame["timestamp"] >= eval_start].copy()
return {
"avg_cash_pct": float(exposure_frame["cash_pct"].mean()),
"median_cash_pct": float(exposure_frame["cash_pct"].median()),
"cash_gt_50_pct": float((exposure_frame["cash_pct"] > 0.50).mean()),
"cash_gt_80_pct": float((exposure_frame["cash_pct"] > 0.80).mean()),
"avg_momentum_pct": float(exposure_frame["momentum_pct"].mean()),
"avg_carry_pct": float(exposure_frame["carry_pct"].mean()),
}
def _metric_line(metrics: dict[str, float], *, include_ann: bool) -> str:
sharpe = metrics.get("sharpe")
if include_ann:
parts = [
f"ret `{metrics['total_return'] * 100:.2f}%`",
f"ann `{metrics['annualized_return'] * 100:.2f}%`",
]
else:
parts = [f"ret `{metrics['total_return'] * 100:.2f}%`"]
if sharpe is not None:
parts.append(f"sharpe `{sharpe:.2f}`")
parts.append(f"mdd `{metrics['max_drawdown'] * 100:.2f}%`")
return ", ".join(parts)
def main() -> None:
bundle, latest_bar = load_component_bundle()
eval_start = latest_bar - pd.Timedelta(days=1825)
static_exact = STATIC_BASELINE
exposure_summary = EXPOSURE_SUMMARY
print("[stage] build 5y overlay components", flush=True)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=PROFILE,
core_filter=CORE_FILTER,
cap_engine=CAP_ENGINE,
chop_engine=CHOP_ENGINE,
dist_engine=DIST_ENGINE,
)
print("[stage] begin approximate candidate search", flush=True)
candidates = [
CashOverlayCandidate(
regime_profile=PROFILE,
core_filter=CORE_FILTER,
cap_engine=CAP_ENGINE,
chop_engine=CHOP_ENGINE,
dist_engine=DIST_ENGINE,
cap_cash_weight=cap_cash_weight,
chop_cash_weight=chop_cash_weight,
dist_cash_weight=dist_cash_weight,
cap_threshold=cap_threshold,
chop_threshold=chop_threshold,
dist_threshold=dist_threshold,
core_block_threshold=core_block_threshold,
)
for (
cap_cash_weight,
chop_cash_weight,
dist_cash_weight,
cap_threshold,
chop_threshold,
dist_threshold,
core_block_threshold,
) in itertools.product(
CAP_CASH_WEIGHTS,
CHOP_CASH_WEIGHTS,
DIST_CASH_WEIGHTS,
CAP_THRESHOLDS,
CHOP_THRESHOLDS,
DIST_THRESHOLDS,
CORE_BLOCK_THRESHOLDS,
)
]
approx_rows: list[dict[str, object]] = []
static_1y_ann = float(static_exact["windows"]["1y"]["annualized_return"])
static_5y_ann = float(static_exact["windows"]["5y"]["annualized_return"])
static_5y_mdd = float(static_exact["windows"]["5y"]["max_drawdown"])
for idx, candidate in enumerate(candidates, start=1):
curve, weights = compose_cash_overlay_curve(candidate=candidate, **components)
window_results, year_results, score, negative_years, mdd_violations = _evaluate_from_curve(curve, latest_bar)
beat_static_flags = {
"1y_ann": float(window_results["1y"]["annualized_return"]) > static_1y_ann,
"5y_ann": float(window_results["5y"]["annualized_return"]) > static_5y_ann,
"5y_mdd": float(window_results["5y"]["max_drawdown"]) >= static_5y_mdd,
}
approx_rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": window_results,
"years": year_results,
"avg_weights": {
"cap": float(weights["cap_weight"].mean()),
"chop": float(weights["chop_weight"].mean()),
"dist": float(weights["dist_weight"].mean()),
"overlay_total": float(weights["overlay_total"].mean()),
"core_cash_pct": float(weights["core_cash_pct"].mean()),
},
"beat_static": beat_static_flags,
"validation": "approx_full_curve_slice_cash_overlay",
}
)
if idx % 500 == 0 or idx == len(candidates):
print(
f"[approx {idx:04d}/{len(candidates)}] "
f"1y={window_results['1y']['total_return'] * 100:.2f}% "
f"5y_ann={window_results['5y']['annualized_return'] * 100:.2f}%",
flush=True,
)
approx_rows.sort(
key=lambda row: (
int(not row["beat_static"]["5y_ann"]),
int(not row["beat_static"]["1y_ann"]),
int(row["negative_years"]),
int(row["mdd_violations"]),
-float(row["score"]),
)
)
exact_top: list[dict[str, object]] = []
print("[stage] begin exact validation for top candidates", flush=True)
for row in approx_rows[:5]:
candidate = CashOverlayCandidate(**row["candidate"])
print(f"[exact-start] {candidate.name}", flush=True)
result = evaluate_cash_overlay_exact(bundle=bundle, latest_bar=latest_bar, candidate=candidate)
exact_top.append(result)
print(
f"[exact] {candidate.name} 1y={result['windows']['1y']['total_return'] * 100:.2f}% "
f"5y_ann={result['windows']['5y']['annualized_return'] * 100:.2f}% "
f"neg={result['negative_years']} mdd_viol={result['mdd_violations']}",
flush=True,
)
exact_top.sort(
key=lambda row: (
int(float(row["windows"]["5y"]["annualized_return"]) <= static_5y_ann),
int(float(row["windows"]["1y"]["annualized_return"]) <= static_1y_ann),
int(row["negative_years"]),
int(row["mdd_violations"]),
-float(row["score"]),
)
)
best_exact = exact_top[0]
soft_exact = json.loads(SOFT_JSON.read_text(encoding="utf-8")) if SOFT_JSON.exists() else None
payload = {
"analysis": "strategy32_cash_overlay_search",
"latest_completed_bar": str(latest_bar),
"candidate_count": len(candidates),
"core_filter": CORE_FILTER,
"engines": {
"cap": CAP_ENGINE,
"chop": CHOP_ENGINE,
"dist": DIST_ENGINE,
},
"exposure_summary": exposure_summary,
"static_exact": static_exact,
"summary": approx_rows[:20],
"exact_top": exact_top,
"best_exact": best_exact,
"best_soft_exact": soft_exact,
}
print("[stage] write outputs", flush=True)
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
lines = [
"# Strategy32 Cash Overlay 탐색결과",
"",
"## 1. 목적",
"",
"정적 core를 줄이던 기존 soft-router를 버리고, `overheat_tolerant` core가 실제로 비워두는 현금 위에만 adverse 엔진을 얹는 cash-overlay 구조를 탐색한다.",
"",
"## 2. 왜 구조를 바꿨는가",
"",
f"- core `overheat_tolerant` 5y 평균 현금 비중: `{exposure_summary['avg_cash_pct'] * 100:.2f}%`",
f"- core 중앙값 현금 비중: `{exposure_summary['median_cash_pct'] * 100:.2f}%`",
f"- 현금 비중 `> 50%` 바 비율: `{exposure_summary['cash_gt_50_pct'] * 100:.2f}%`",
f"- 현금 비중 `> 80%` 바 비율: `{exposure_summary['cash_gt_80_pct'] * 100:.2f}%`",
"",
"즉 기존 soft-router는 이미 대부분 현금인 core를 또 줄이고 있었다. overlay는 core를 대체하는 게 아니라, core가 실제로 안 쓰는 현금에만 들어가야 맞다.",
"",
"## 3. 탐색 범위",
"",
f"- profile: `{PROFILE}`",
f"- core filter: `{CORE_FILTER}`",
f"- cap engine: `{CAP_ENGINE}`",
f"- chop engine: `{CHOP_ENGINE}`",
f"- dist engine: `{DIST_ENGINE}`",
f"- cap cash weights: `{CAP_CASH_WEIGHTS}`",
f"- chop cash weights: `{CHOP_CASH_WEIGHTS}`",
f"- dist cash weights: `{DIST_CASH_WEIGHTS}`",
f"- cap thresholds: `{CAP_THRESHOLDS}`",
f"- chop thresholds: `{CHOP_THRESHOLDS}`",
f"- dist thresholds: `{DIST_THRESHOLDS}`",
f"- core block thresholds: `{CORE_BLOCK_THRESHOLDS}`",
f"- candidate count: `{len(candidates)}`",
"",
"## 4. 정적 core exact 기준선",
"",
f"- 1y: {_metric_line(static_exact['windows']['1y'], include_ann=False)}",
f"- 2y: {_metric_line(static_exact['windows']['2y'], include_ann=True)}",
f"- 3y: {_metric_line(static_exact['windows']['3y'], include_ann=True)}",
f"- 4y: {_metric_line(static_exact['windows']['4y'], include_ann=True)}",
f"- 5y: {_metric_line(static_exact['windows']['5y'], include_ann=True)}",
f"- 2026 YTD: {_metric_line(static_exact['years']['2026_YTD'], include_ann=False)}",
f"- 2025: {_metric_line(static_exact['years']['2025'], include_ann=False)}",
f"- 2024: {_metric_line(static_exact['years']['2024'], include_ann=False)}",
f"- 2023: {_metric_line(static_exact['years']['2023'], include_ann=False)}",
f"- 2022: {_metric_line(static_exact['years']['2022'], include_ann=False)}",
f"- 2021: {_metric_line(static_exact['years']['2021'], include_ann=False)}",
"",
"## 5. cash-overlay exact 상위 후보",
"",
]
for idx, row in enumerate(exact_top, start=1):
candidate = row["candidate"]
lines.extend(
[
f"### {idx}. {row['name']}",
"",
f"- weights: `cap {candidate['cap_cash_weight']:.2f}`, `chop {candidate['chop_cash_weight']:.2f}`, `dist {candidate['dist_cash_weight']:.2f}`",
f"- thresholds: `cap {candidate['cap_threshold']:.2f}`, `chop {candidate['chop_threshold']:.2f}`, `dist {candidate['dist_threshold']:.2f}`, `block {candidate['core_block_threshold']:.2f}`",
f"- 1y: {_metric_line(row['windows']['1y'], include_ann=False)}",
f"- 2y: {_metric_line(row['windows']['2y'], include_ann=True)}",
f"- 3y: {_metric_line(row['windows']['3y'], include_ann=True)}",
f"- 4y: {_metric_line(row['windows']['4y'], include_ann=True)}",
f"- 5y: {_metric_line(row['windows']['5y'], include_ann=True)}",
f"- 2026 YTD: {_metric_line(row['years']['2026_YTD'], include_ann=False)}",
f"- 2025: {_metric_line(row['years']['2025'], include_ann=False)}",
f"- 2024: {_metric_line(row['years']['2024'], include_ann=False)}",
f"- 2023: {_metric_line(row['years']['2023'], include_ann=False)}",
f"- 2022: {_metric_line(row['years']['2022'], include_ann=False)}",
f"- 2021: {_metric_line(row['years']['2021'], include_ann=False)}",
f"- score `{row['score']:.3f}`, negative years `{row['negative_years']}`, mdd violations `{row['mdd_violations']}`",
"",
]
)
lines.extend(
[
"## 6. 결론",
"",
(
"cash-overlay가 정적 core보다 나아졌는지는 `best_exact`와 `static_exact` 비교로 판단한다. "
"핵심 비교 포인트는 `1y`, `5y annualized`, `5y MDD`, 그리고 `2025/2024`의 음수 여부다."
),
"",
f"- best cash-overlay 1y: `{best_exact['windows']['1y']['total_return'] * 100:.2f}%` vs static `{static_exact['windows']['1y']['total_return'] * 100:.2f}%`",
f"- best cash-overlay 5y ann: `{best_exact['windows']['5y']['annualized_return'] * 100:.2f}%` vs static `{static_exact['windows']['5y']['annualized_return'] * 100:.2f}%`",
f"- best cash-overlay 5y MDD: `{best_exact['windows']['5y']['max_drawdown'] * 100:.2f}%` vs static `{static_exact['windows']['5y']['max_drawdown'] * 100:.2f}%`",
"",
]
)
if soft_exact:
lines.extend(
[
"## 7. 기존 replacement soft-router와 비교",
"",
f"- previous soft 1y: `{soft_exact['windows']['1y']['total_return'] * 100:.2f}%`",
f"- previous soft 5y ann: `{soft_exact['windows']['5y']['annualized_return'] * 100:.2f}%`",
f"- previous soft 5y MDD: `{soft_exact['windows']['5y']['max_drawdown'] * 100:.2f}%`",
"",
]
)
OUT_MD.write_text("\n".join(lines), encoding="utf-8")
print("[done] cash overlay search complete", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,169 @@
from __future__ import annotations
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
BASELINE_PATH,
BEST_CASH_OVERLAY,
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
OUT_JSON as SEARCH_OUT_JSON,
RELAXED_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
HybridSwitchCandidate,
_compose_hybrid_curve,
)
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate, segment_metrics
OUT_JSON = Path("/tmp/strategy32_current_relaxed_hybrid_exact.json")
BEST_SEARCH_CANDIDATE = HybridSwitchCandidate(
positive_regimes=("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
core_score_min=0.60,
breadth_persist_min=0.50,
funding_persist_min=0.55,
panic_max=0.20,
choppy_max=0.40,
distribution_max=0.30,
)
def _baseline_summary() -> dict[str, object]:
payload = json.loads(BASELINE_PATH.read_text(encoding="utf-8"))
variants = payload["variants"]
return {name: variants[name]["results"] for name in ("current_overheat", "relaxed_overheat")}
def _period_specs(latest_bar: pd.Timestamp) -> list[tuple[str, str, pd.Timestamp, pd.Timestamp]]:
specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
for days, label in WINDOWS:
specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
for label, start, end_exclusive in YEAR_PERIODS:
specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
specs.append(("year", "2026_YTD", YTD_START, latest_bar))
return specs
def _period_worker(
cache_path: str,
candidate_payload: dict[str, object],
kind: str,
label: str,
start_text: str,
end_text: str,
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
bundle, _ = load_component_bundle(cache_path)
candidate = HybridSwitchCandidate(**candidate_payload)
start = pd.Timestamp(start_text)
end = pd.Timestamp(end_text)
current = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
relaxed = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
curve, rows = _compose_hybrid_curve(
current_components=current,
relaxed_components=relaxed,
switch_candidate=candidate,
)
latest_rows: list[dict[str, object]] = []
if label == "2026_YTD":
latest_rows = rows.tail(5).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
return kind, label, segment_metrics(curve, start, end), latest_rows
def main() -> None:
if SEARCH_OUT_JSON.exists():
payload = json.loads(SEARCH_OUT_JSON.read_text(encoding="utf-8"))
if payload.get("search_top"):
best_candidate = HybridSwitchCandidate(**payload["search_top"][0]["candidate"])
else:
best_candidate = BEST_SEARCH_CANDIDATE
else:
best_candidate = BEST_SEARCH_CANDIDATE
_, latest_bar = load_component_bundle(CACHE_PATH)
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
latest_rows: list[dict[str, object]] = []
specs = _period_specs(latest_bar)
ctx = mp.get_context("fork")
with ProcessPoolExecutor(max_workers=min(6, len(specs)), mp_context=ctx) as executor:
future_map = {
executor.submit(
_period_worker,
CACHE_PATH,
asdict(best_candidate),
kind,
label,
str(start),
str(end),
): (kind, label)
for kind, label, start, end in specs
}
for future in as_completed(future_map):
kind, label = future_map[future]
kind_result, label_result, metrics, latest = future.result()
if kind_result == "window":
window_results[label_result] = metrics
else:
year_results[label_result] = metrics
if latest:
latest_rows = latest
print(f"[done] {label_result}", flush=True)
score, negative_years, mdd_violations = score_candidate(
{label: window_results[label] for _, label in WINDOWS},
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
)
payload = {
"analysis": "current_relaxed_hybrid_exact",
"latest_bar": str(latest_bar),
"candidate": asdict(best_candidate),
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": {label: window_results[label] for _, label in WINDOWS},
"years": year_results,
"latest_rows": latest_rows,
"baselines": _baseline_summary(),
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,376 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
from strategy32.research.soft_router import (
build_cash_overlay_period_components,
load_component_bundle,
score_candidate,
segment_metrics,
)
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
BASELINE_PATH = Path("/tmp/strategy32_recent_core_filter_comparison.json")
OUT_JSON = Path("/tmp/strategy32_current_relaxed_hybrid_experiment.json")
WINDOWS = (
(365, "1y"),
(730, "2y"),
(1095, "3y"),
(1460, "4y"),
(1825, "5y"),
)
YEAR_PERIODS = (
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
)
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
CURRENT_OVERHEAT_OVERRIDES = {
**LIVE_STRATEGY_OVERRIDES,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
}
RELAXED_OVERHEAT_OVERRIDES = {
**LIVE_STRATEGY_OVERRIDES,
"momentum_min_score": 0.58,
"momentum_min_relative_strength": -0.03,
"momentum_min_7d_return": 0.00,
"universe_min_avg_dollar_volume": 75_000_000.0,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
}
@dataclass(frozen=True, slots=True)
class HybridSwitchCandidate:
positive_regimes: tuple[str, ...]
core_score_min: float
breadth_persist_min: float
funding_persist_min: float
panic_max: float
choppy_max: float
distribution_max: float
@property
def name(self) -> str:
regimes = ",".join(self.positive_regimes)
return (
f"regimes:{regimes}"
f"|core>={self.core_score_min:.2f}"
f"|breadth>={self.breadth_persist_min:.2f}"
f"|funding>={self.funding_persist_min:.2f}"
f"|panic<={self.panic_max:.2f}"
f"|choppy<={self.choppy_max:.2f}"
f"|dist<={self.distribution_max:.2f}"
)
def _clip01(value: float) -> float:
return min(max(float(value), 0.0), 1.0)
def _overlay_weights(candidate, score_row: dict[str, float], core_cash_pct: float) -> tuple[float, float, float]:
core_score = float(score_row.get("core_score", 0.0))
panic_score = float(score_row.get("panic_score", 0.0))
choppy_score = float(score_row.get("choppy_score", 0.0))
distribution_score = float(score_row.get("distribution_score", 0.0))
cap_signal = _clip01((panic_score - candidate.cap_threshold) / max(1.0 - candidate.cap_threshold, 1e-9))
chop_signal = _clip01((choppy_score - candidate.chop_threshold) / max(1.0 - candidate.chop_threshold, 1e-9))
dist_signal = _clip01((distribution_score - candidate.dist_threshold) / max(1.0 - candidate.dist_threshold, 1e-9))
if core_score > candidate.core_block_threshold:
chop_signal *= 0.25
dist_signal *= 0.35
cap_weight = float(core_cash_pct) * candidate.cap_cash_weight * cap_signal
chop_weight = float(core_cash_pct) * candidate.chop_cash_weight * chop_signal
dist_weight = float(core_cash_pct) * candidate.dist_cash_weight * dist_signal
overlay_total = cap_weight + chop_weight + dist_weight
if overlay_total > core_cash_pct and overlay_total > 0.0:
scale = float(core_cash_pct) / overlay_total
cap_weight *= scale
chop_weight *= scale
dist_weight *= scale
return cap_weight, chop_weight, dist_weight
def _pick_relaxed(score_row: dict[str, float], candidate: HybridSwitchCandidate) -> bool:
return (
str(score_row.get("strategic_regime")) in candidate.positive_regimes
and float(score_row.get("core_score", 0.0)) >= candidate.core_score_min
and float(score_row.get("breadth_persist", 0.0) or 0.0) >= candidate.breadth_persist_min
and float(score_row.get("funding_persist", 0.0) or 0.0) >= candidate.funding_persist_min
and float(score_row.get("panic_score", 0.0)) <= candidate.panic_max
and float(score_row.get("choppy_score", 0.0)) <= candidate.choppy_max
and float(score_row.get("distribution_score", 0.0)) <= candidate.distribution_max
)
def _compose_hybrid_curve(
*,
current_components: dict[str, object],
relaxed_components: dict[str, object],
switch_candidate: HybridSwitchCandidate,
) -> tuple[pd.Series, pd.DataFrame]:
timestamps = list(current_components["timestamps"])
score_map = current_components["score_frame"].set_index("timestamp").sort_index()
current_cash_map = current_components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
relaxed_cash_map = relaxed_components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
current_core_returns = current_components["core_returns"]
relaxed_core_returns = relaxed_components["core_returns"]
cap_returns = current_components["cap_returns"]
chop_returns = current_components["chop_returns"]
dist_returns = current_components["dist_returns"]
equity = 1000.0
idx = [timestamps[0]]
vals = [equity]
rows: list[dict[str, object]] = []
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
use_relaxed = _pick_relaxed(score_row, switch_candidate)
active_name = "relaxed_overheat" if use_relaxed else "current_overheat"
core_returns = relaxed_core_returns if use_relaxed else current_core_returns
cash_map = relaxed_cash_map if use_relaxed else current_cash_map
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
cap_weight, chop_weight, dist_weight = _overlay_weights(BEST_CASH_OVERLAY, score_row, core_cash_pct)
bar_ret = (
float(core_returns.get(execution_ts, 0.0))
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
)
equity *= max(0.0, 1.0 + bar_ret)
idx.append(execution_ts)
vals.append(equity)
rows.append(
{
"timestamp": execution_ts,
"active_core": active_name,
"core_cash_pct": core_cash_pct,
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"portfolio_return": bar_ret,
}
)
curve = pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
return curve, pd.DataFrame(rows)
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
windows: dict[str, dict[str, float]] = {}
for days, label in WINDOWS:
start = latest_bar - pd.Timedelta(days=days)
windows[label] = segment_metrics(curve, start, latest_bar)
years: dict[str, dict[str, float]] = {}
for label, start, end_exclusive in YEAR_PERIODS:
years[label] = segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return windows, years, score, negative_years, mdd_violations
def _candidate_space() -> list[HybridSwitchCandidate]:
space: list[HybridSwitchCandidate] = []
positive_sets = (
("EUPHORIC_BREAKOUT",),
("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
)
for positive_regimes in positive_sets:
for core_score_min in (0.50, 0.55, 0.60):
for breadth_persist_min in (0.50, 0.55, 0.60):
for funding_persist_min in (0.55, 0.60, 0.65):
for panic_max in (0.20, 0.30):
for choppy_max in (0.40, 0.50):
for distribution_max in (0.30, 0.40):
space.append(
HybridSwitchCandidate(
positive_regimes=positive_regimes,
core_score_min=core_score_min,
breadth_persist_min=breadth_persist_min,
funding_persist_min=funding_persist_min,
panic_max=panic_max,
choppy_max=choppy_max,
distribution_max=distribution_max,
)
)
return space
def _baseline_summary() -> dict[str, object]:
payload = json.loads(BASELINE_PATH.read_text(encoding="utf-8"))
variants = payload["variants"]
result: dict[str, object] = {}
for name in ("current_overheat", "relaxed_overheat"):
result[name] = variants[name]["results"]
return result
def _evaluate_exact_candidate(bundle, latest_bar: pd.Timestamp, switch_candidate: HybridSwitchCandidate) -> dict[str, object]:
windows: dict[str, dict[str, float]] = {}
years: dict[str, dict[str, float]] = {}
latest_rows: list[dict[str, object]] = []
periods = [
*(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar) for days, label in WINDOWS),
*(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))) for label, start, end_exclusive in YEAR_PERIODS),
("year", "2026_YTD", YTD_START, latest_bar),
]
for kind, label, start, end in periods:
current = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
relaxed = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
curve, rows = _compose_hybrid_curve(
current_components=current,
relaxed_components=relaxed,
switch_candidate=switch_candidate,
)
metrics = segment_metrics(curve, start, end)
if kind == "window":
windows[label] = metrics
else:
years[label] = metrics
if label == "2026_YTD":
latest_rows = rows.tail(3).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return {
"candidate": asdict(switch_candidate),
"name": switch_candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
"latest_rows": latest_rows,
}
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current components", flush=True)
current_components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
print("[phase] build relaxed components", flush=True)
relaxed_components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
search_rows: list[dict[str, object]] = []
candidates = _candidate_space()
print("[phase] search switch candidates", flush=True)
for idx, candidate in enumerate(candidates, start=1):
curve, rows = _compose_hybrid_curve(
current_components=current_components,
relaxed_components=relaxed_components,
switch_candidate=candidate,
)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
relaxed_share = float((rows["active_core"] == "relaxed_overheat").mean()) if not rows.empty else 0.0
search_rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"relaxed_share": relaxed_share,
"windows": windows,
"years": years,
}
)
if idx % 36 == 0 or idx == len(candidates):
print(f"[search] {idx}/{len(candidates)}", flush=True)
search_rows.sort(key=lambda row: float(row["score"]), reverse=True)
best_search = search_rows[0]
print(f"[phase] exact best {best_search['name']}", flush=True)
best_exact = _evaluate_exact_candidate(
bundle,
latest_bar,
HybridSwitchCandidate(**best_search["candidate"]),
)
payload = {
"analysis": "current_relaxed_hybrid_experiment",
"latest_bar": str(latest_bar),
"candidate": asdict(BEST_CASH_OVERLAY),
"baselines": _baseline_summary(),
"search_top": search_rows[:5],
"best_exact": best_exact,
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,164 @@
from __future__ import annotations
import copy
import itertools
import json
import sys
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V5_BASELINE, build_strategy32_config
from strategy32.data import build_strategy32_market_bundle
WINDOWS = [(30, "1m"), (365, "1y"), (1095, "3y"), (1825, "5y")]
FEATURES: list[tuple[str, str, bool]] = [
("no_sideways", "enable_sideways_engine", False),
("strong_kill_switch", "enable_strong_kill_switch", True),
("daily_trend_filter", "enable_daily_trend_filter", True),
("expanded_hedge", "enable_expanded_hedge", True),
("max_holding_exit", "enable_max_holding_exit", True),
]
def variant_name(enabled: list[str]) -> str:
return "baseline_v5" if not enabled else "+".join(enabled)
def balanced_score(results: dict[str, dict[str, float | int | str]]) -> float:
score = 0.0
for label, weight in (("1y", 1.0), ("3y", 1.0), ("5y", 1.2)):
annualized = float(results[label]["annualized_return"])
drawdown = abs(float(results[label]["max_drawdown"]))
score += weight * (annualized / max(drawdown, 0.01))
score += 0.25 * float(results["1m"]["total_return"])
return score
def build_variants() -> list[tuple[str, dict[str, bool], list[str]]]:
variants: list[tuple[str, dict[str, bool], list[str]]] = [("baseline_v5", {}, [])]
feature_names = [feature[0] for feature in FEATURES]
for r in range(1, len(FEATURES) + 1):
for combo in itertools.combinations(range(len(FEATURES)), r):
overrides: dict[str, bool] = {}
enabled: list[str] = []
for idx in combo:
label, attr, value = FEATURES[idx]
overrides[attr] = value
enabled.append(label)
variants.append((variant_name(enabled), overrides, enabled))
return variants
def main() -> None:
base = build_strategy32_config(PROFILE_V5_BASELINE)
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
start = end - pd.Timedelta(days=max(days for days, _ in WINDOWS) + base.warmup_days + 14)
print("fetching bundle...")
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle(
symbols=base.symbols,
auto_discover_symbols=base.auto_discover_symbols,
quote_assets=base.quote_assets,
excluded_base_assets=base.excluded_base_assets,
min_quote_volume_24h=base.discovery_min_quote_volume_24h,
start=start,
end=end,
timeframe=base.timeframe,
max_staleness_days=base.max_symbol_staleness_days,
)
print("latest", latest_completed_bar)
results: dict[str, dict[str, dict[str, float | int | str]]] = {}
summary_rows: list[dict[str, float | int | str | list[str]]] = []
for idx, (name, overrides, enabled) in enumerate(build_variants(), start=1):
cfg = copy.deepcopy(base)
for attr, value in overrides.items():
setattr(cfg, attr, value)
variant_results = {}
print(f"\n[{idx:02d}/32] {name}")
for days, label in WINDOWS:
eval_end = latest_completed_bar
eval_start = eval_end - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
sliced = slice_bundle(bundle, raw_start, eval_end)
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
metrics["engine_pnl"] = result.engine_pnl
metrics["total_trades"] = result.total_trades
variant_results[label] = metrics
print(
label,
"ret",
round(float(metrics["total_return"]) * 100, 2),
"mdd",
round(float(metrics["max_drawdown"]) * 100, 2),
"sharpe",
round(float(metrics["sharpe"]), 2),
"trades",
metrics["trade_count"],
)
score = balanced_score(variant_results)
results[name] = variant_results
summary_rows.append(
{
"name": name,
"enabled": enabled,
"balanced_score": score,
"ret_1m": float(variant_results["1m"]["total_return"]),
"ret_1y": float(variant_results["1y"]["total_return"]),
"ret_3y": float(variant_results["3y"]["total_return"]),
"ret_5y": float(variant_results["5y"]["total_return"]),
"mdd_1y": float(variant_results["1y"]["max_drawdown"]),
"mdd_3y": float(variant_results["3y"]["max_drawdown"]),
"mdd_5y": float(variant_results["5y"]["max_drawdown"]),
}
)
summary_rows.sort(key=lambda row: float(row["balanced_score"]), reverse=True)
payload = {
"strategy": "strategy32",
"analysis": "v6_exhaustive_combo",
"initial_capital": 1000.0,
"auto_discover_symbols": base.auto_discover_symbols,
"latest_completed_bar": str(latest_completed_bar),
"requested_symbols": [] if base.auto_discover_symbols else base.symbols,
"accepted_symbols": accepted_symbols,
"rejected_symbols": rejected_symbols,
"quote_by_symbol": quote_by_symbol,
"timeframe": base.timeframe,
"results": results,
"summary": summary_rows,
}
out = Path("/tmp/strategy32_v6_exhaustive_combos.json")
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print("\nTop 10 by balanced score")
for row in summary_rows[:10]:
print(
row["name"],
"score",
round(float(row["balanced_score"]), 3),
"1y",
round(float(row["ret_1y"]) * 100, 2),
"3y",
round(float(row["ret_3y"]) * 100, 2),
"5y",
round(float(row["ret_5y"]) * 100, 2),
"mdd5y",
round(float(row["mdd_5y"]) * 100, 2),
)
print("\nwrote", out)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,283 @@
from __future__ import annotations
import copy
import itertools
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
from strategy29.common.models import MarketDataBundle
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V7_DEFAULT, Strategy32Config, build_strategy32_config
from strategy32.data import build_strategy32_market_bundle
COARSE_WINDOWS = [(365, "1y"), (1095, "3y")]
FINAL_WINDOWS = [(365, "1y"), (1095, "3y"), (1825, "5y")]
COARSE_LIQUIDITY_FLOORS = [5_000_000.0, 10_000_000.0, 20_000_000.0, 50_000_000.0]
COARSE_MOMENTUM_SCORES = [0.55, 0.60, 0.65]
COARSE_RELATIVE_STRENGTH = [0.00, 0.02]
COARSE_7D_RETURNS = [0.00, 0.02]
FINE_CORRELATION_CAPS = [0.70, 0.78]
FINE_CARRY_MIN_EDGE = [0.0, 0.002]
TOP_COARSE_FOR_FINE = 8
TOP_FINE_FOR_FINAL = 5
@dataclass(slots=True)
class FilterVariant:
name: str
liquidity_floor: float
momentum_min_score: float
momentum_min_relative_strength: float
momentum_min_7d_return: float
max_pairwise_correlation: float
carry_min_expected_edge: float
GLOBAL_BUNDLE: MarketDataBundle | None = None
GLOBAL_LATEST_BAR: pd.Timestamp | None = None
def _subset_bundle(bundle: MarketDataBundle, symbols: set[str]) -> MarketDataBundle:
return MarketDataBundle(
prices={symbol: df for symbol, df in bundle.prices.items() if symbol in symbols},
funding={symbol: df for symbol, df in bundle.funding.items() if symbol in symbols},
)
def _score_results(results: dict[str, dict[str, float | int | str]], include_5y: bool) -> float:
score = 0.0
ret_1y = float(results["1y"]["total_return"])
ann_1y = float(results["1y"]["annualized_return"])
mdd_1y = abs(float(results["1y"]["max_drawdown"]))
ret_3y = float(results["3y"]["total_return"])
ann_3y = float(results["3y"]["annualized_return"])
mdd_3y = abs(float(results["3y"]["max_drawdown"]))
score += 1.8 * (ann_1y / max(mdd_1y, 0.01))
score += 1.0 * (ann_3y / max(mdd_3y, 0.01))
score += 0.25 * ret_1y + 0.15 * ret_3y
if ret_1y <= 0:
score -= 6.0
if float(results["1y"]["max_drawdown"]) < -0.25:
score -= 1.0
if include_5y:
ret_5y = float(results["5y"]["total_return"])
ann_5y = float(results["5y"]["annualized_return"])
mdd_5y = abs(float(results["5y"]["max_drawdown"]))
score += 0.8 * (ann_5y / max(mdd_5y, 0.01))
score += 0.10 * ret_5y
if float(results["5y"]["max_drawdown"]) < -0.30:
score -= 1.0
return score
def _evaluate_variant(variant: FilterVariant, windows: list[tuple[int, str]]) -> dict[str, object]:
if GLOBAL_BUNDLE is None or GLOBAL_LATEST_BAR is None:
raise RuntimeError("global bundle not initialized")
cfg: Strategy32Config = build_strategy32_config(PROFILE_V7_DEFAULT)
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
cfg.universe_min_avg_dollar_volume = variant.liquidity_floor
cfg.momentum_min_score = variant.momentum_min_score
cfg.momentum_min_relative_strength = variant.momentum_min_relative_strength
cfg.momentum_min_7d_return = variant.momentum_min_7d_return
cfg.max_pairwise_correlation = variant.max_pairwise_correlation
cfg.carry_min_expected_edge = variant.carry_min_expected_edge
bundle = GLOBAL_BUNDLE
results: dict[str, dict[str, float | int | str]] = {}
for days, label in windows:
eval_end = GLOBAL_LATEST_BAR
eval_start = eval_end - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
sliced = slice_bundle(bundle, raw_start, eval_end)
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
metrics["engine_pnl"] = result.engine_pnl
metrics["total_trades"] = result.total_trades
metrics["universe_size"] = len(bundle.prices)
results[label] = metrics
score = _score_results(results, include_5y=any(label == "5y" for _, label in windows))
return {
"variant": asdict(variant),
"score": score,
"results": results,
}
def _run_parallel(variants: list[FilterVariant], windows: list[tuple[int, str]], workers: int = 6) -> list[dict[str, object]]:
ctx = mp.get_context("fork")
rows: list[dict[str, object]] = []
with ProcessPoolExecutor(max_workers=workers, mp_context=ctx) as executor:
future_map = {executor.submit(_evaluate_variant, variant, windows): variant for variant in variants}
total = len(future_map)
done = 0
for future in as_completed(future_map):
row = future.result()
rows.append(row)
done += 1
variant = row["variant"]
results = row["results"]
print(
f"[{done:02d}/{total}] {variant['name']} score={row['score']:.3f} "
f"1y={float(results['1y']['total_return'])*100:.2f}% "
f"3y={float(results['3y']['total_return'])*100:.2f}%",
flush=True,
)
rows.sort(key=lambda row: float(row["score"]), reverse=True)
return rows
def _build_coarse_variants() -> list[FilterVariant]:
variants: list[FilterVariant] = []
for floor, score, rs, ret7 in itertools.product(
COARSE_LIQUIDITY_FLOORS,
COARSE_MOMENTUM_SCORES,
COARSE_RELATIVE_STRENGTH,
COARSE_7D_RETURNS,
):
name = f"liq{int(floor/1_000_000)}m_s{score:.2f}_rs{rs:.2f}_r7{ret7:.2f}"
variants.append(
FilterVariant(
name=name,
liquidity_floor=floor,
momentum_min_score=score,
momentum_min_relative_strength=rs,
momentum_min_7d_return=ret7,
max_pairwise_correlation=0.78,
carry_min_expected_edge=0.0,
)
)
return variants
def _build_fine_variants(top_rows: list[dict[str, object]]) -> list[FilterVariant]:
variants: list[FilterVariant] = []
seen: set[tuple] = set()
for row in top_rows[:TOP_COARSE_FOR_FINE]:
base = row["variant"]
for corr_cap, carry_edge in itertools.product(FINE_CORRELATION_CAPS, FINE_CARRY_MIN_EDGE):
key = (
base["liquidity_floor"],
base["momentum_min_score"],
base["momentum_min_relative_strength"],
base["momentum_min_7d_return"],
corr_cap,
carry_edge,
)
if key in seen:
continue
seen.add(key)
variants.append(
FilterVariant(
name=(
f"liq{int(base['liquidity_floor']/1_000_000)}m_"
f"s{base['momentum_min_score']:.2f}_"
f"rs{base['momentum_min_relative_strength']:.2f}_"
f"r7{base['momentum_min_7d_return']:.2f}_"
f"corr{corr_cap:.2f}_carry{carry_edge:.3f}"
),
liquidity_floor=float(base["liquidity_floor"]),
momentum_min_score=float(base["momentum_min_score"]),
momentum_min_relative_strength=float(base["momentum_min_relative_strength"]),
momentum_min_7d_return=float(base["momentum_min_7d_return"]),
max_pairwise_correlation=float(corr_cap),
carry_min_expected_edge=float(carry_edge),
)
)
return variants
def main() -> None:
global GLOBAL_BUNDLE, GLOBAL_LATEST_BAR
base = build_strategy32_config(PROFILE_V7_DEFAULT)
max_days = max(days for days, _ in FINAL_WINDOWS)
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
start_5y = end - pd.Timedelta(days=max_days + base.warmup_days + 14)
start_3y = end - pd.Timedelta(days=max(days for days, _ in COARSE_WINDOWS) + base.warmup_days + 14)
lowest_floor = min(COARSE_LIQUIDITY_FLOORS)
print("fetching 3y bundle for coarse search...")
GLOBAL_BUNDLE, GLOBAL_LATEST_BAR, accepted_symbols_3y, rejected_symbols_3y, quote_by_symbol_3y = build_strategy32_market_bundle(
symbols=base.symbols,
auto_discover_symbols=True,
quote_assets=base.quote_assets,
excluded_base_assets=base.excluded_base_assets,
min_quote_volume_24h=lowest_floor,
start=start_3y,
end=end,
timeframe=base.timeframe,
max_staleness_days=base.max_symbol_staleness_days,
)
coarse_rows = _run_parallel(_build_coarse_variants(), COARSE_WINDOWS)
fine_rows = _run_parallel(_build_fine_variants(coarse_rows), COARSE_WINDOWS)
print("fetching 5y bundle for final validation...")
GLOBAL_BUNDLE, GLOBAL_LATEST_BAR, accepted_symbols_5y, rejected_symbols_5y, quote_by_symbol_5y = build_strategy32_market_bundle(
symbols=base.symbols,
auto_discover_symbols=True,
quote_assets=base.quote_assets,
excluded_base_assets=base.excluded_base_assets,
min_quote_volume_24h=lowest_floor,
start=start_5y,
end=end,
timeframe=base.timeframe,
max_staleness_days=base.max_symbol_staleness_days,
)
final_variants = [FilterVariant(**row["variant"]) for row in fine_rows[:TOP_FINE_FOR_FINAL]]
final_rows = _run_parallel(final_variants, FINAL_WINDOWS)
payload = {
"strategy": "strategy32",
"analysis": "wide_universe_filter_search",
"profile": PROFILE_V7_DEFAULT,
"initial_capital": 1000.0,
"latest_completed_bar": str(GLOBAL_LATEST_BAR),
"accepted_symbols_3y": accepted_symbols_3y,
"rejected_symbols_3y": rejected_symbols_3y,
"quote_by_symbol_3y": quote_by_symbol_3y,
"accepted_symbols_5y": accepted_symbols_5y,
"rejected_symbols_5y": rejected_symbols_5y,
"quote_by_symbol_5y": quote_by_symbol_5y,
"coarse_top10": coarse_rows[:10],
"fine_top10": fine_rows[:10],
"final_ranked": final_rows,
}
out = Path("/tmp/strategy32_filter_search.json")
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print("\nTop final variants")
for row in final_rows:
metrics = row["results"]
print(
row["variant"]["name"],
"score",
round(float(row["score"]), 3),
"1y",
round(float(metrics["1y"]["total_return"]) * 100, 2),
"3y",
round(float(metrics["3y"]["total_return"]) * 100, 2),
"5y",
round(float(metrics["5y"]["total_return"]) * 100, 2),
"mdd5y",
round(float(metrics["5y"]["max_drawdown"]) * 100, 2),
)
print("\nwrote", out)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,447 @@
from __future__ import annotations
import copy
import itertools
import json
import multiprocessing as mp
import random
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
from strategy29.common.models import MarketDataBundle
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V7_DEFAULT, Strategy32Config, build_strategy32_config
from strategy32.data import build_strategy32_market_bundle
FINAL_WINDOWS = [(365, "1y"), (1095, "3y"), (1825, "5y")]
RANDOM_SEED = 32
RANDOM_SAMPLE_SIZE = 96
TOP_BROAD_FOR_LOCAL = 10
LOCAL_SAMPLE_SIZE = 192
TOP_FINAL_FOR_EXACT = 8
LIQUIDITY_FLOORS = [20_000_000.0, 30_000_000.0, 40_000_000.0, 50_000_000.0, 60_000_000.0, 75_000_000.0, 100_000_000.0]
AVG_DOLLAR_VOLUME_MULTIPLIERS = [0.50, 0.75, 1.00]
MOMENTUM_MIN_SCORES = [0.50, 0.55, 0.60, 0.65, 0.70]
MOMENTUM_MIN_RS = [-0.02, 0.00, 0.02]
MOMENTUM_MIN_7D = [-0.02, 0.00, 0.02]
CORRELATION_CAPS = [0.65, 0.70, 0.74, 0.78, 0.82]
CARRY_MIN_EDGES = [0.0, 0.001, 0.002]
@dataclass(slots=True)
class ExtendedFilterVariant:
name: str
liquidity_floor: float
avg_dollar_volume_floor: float
momentum_min_score: float
momentum_min_relative_strength: float
momentum_min_7d_return: float
max_pairwise_correlation: float
carry_min_expected_edge: float
GLOBAL_BUNDLE: MarketDataBundle | None = None
GLOBAL_LATEST_BAR: pd.Timestamp | None = None
def _subset_bundle(bundle: MarketDataBundle, symbols: set[str]) -> MarketDataBundle:
return MarketDataBundle(
prices={symbol: df for symbol, df in bundle.prices.items() if symbol in symbols},
funding={symbol: df for symbol, df in bundle.funding.items() if symbol in symbols},
)
def _window_return(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> float | None:
window = curve.loc[(curve.index >= start) & (curve.index <= end)]
if len(window) < 2:
return None
start_equity = float(window.iloc[0])
end_equity = float(window.iloc[-1])
if start_equity <= 0:
return None
return end_equity / start_equity - 1.0
def _rolling_returns(curve: pd.Series, *, window_days: int, step_days: int) -> list[float]:
if curve.empty:
return []
end = curve.index[-1]
start = curve.index[0]
returns: list[float] = []
cursor = start + pd.Timedelta(days=window_days)
while cursor <= end:
ret = _window_return(curve, cursor - pd.Timedelta(days=window_days), cursor)
if ret is not None:
returns.append(ret)
cursor += pd.Timedelta(days=step_days)
return returns
def _summarize_rolling(curve: pd.Series) -> dict[str, float]:
rolling_12m = _rolling_returns(curve, window_days=365, step_days=30)
rolling_24m = _rolling_returns(curve, window_days=730, step_days=30)
metrics = {
"rolling_12m_count": float(len(rolling_12m)),
"rolling_12m_positive_ratio": float(sum(ret > 0 for ret in rolling_12m) / len(rolling_12m)) if rolling_12m else 0.0,
"rolling_12m_median": float(pd.Series(rolling_12m).median()) if rolling_12m else 0.0,
"rolling_12m_worst": float(min(rolling_12m)) if rolling_12m else 0.0,
"rolling_24m_count": float(len(rolling_24m)),
"rolling_24m_positive_ratio": float(sum(ret > 0 for ret in rolling_24m) / len(rolling_24m)) if rolling_24m else 0.0,
"rolling_24m_median": float(pd.Series(rolling_24m).median()) if rolling_24m else 0.0,
"rolling_24m_worst": float(min(rolling_24m)) if rolling_24m else 0.0,
}
return metrics
def _score_variant(full_metrics: dict[str, float], rolling_metrics: dict[str, float]) -> float:
annualized_return = float(full_metrics["annualized_return"])
max_dd = abs(float(full_metrics["max_drawdown"]))
sharpe = float(full_metrics["sharpe"])
calmar = annualized_return / max(max_dd, 0.01)
score = 0.0
score += 2.0 * calmar
score += 0.6 * sharpe
score += 1.6 * rolling_metrics["rolling_12m_positive_ratio"]
score += 1.0 * rolling_metrics["rolling_24m_positive_ratio"]
score += 3.0 * rolling_metrics["rolling_12m_median"]
score += 2.2 * rolling_metrics["rolling_24m_median"]
score += 1.8 * rolling_metrics["rolling_12m_worst"]
score += 1.0 * rolling_metrics["rolling_24m_worst"]
score += 0.25 * float(full_metrics["total_return"])
if rolling_metrics["rolling_12m_positive_ratio"] < 0.55:
score -= 0.8
if rolling_metrics["rolling_12m_worst"] < -0.18:
score -= 1.2
if float(full_metrics["max_drawdown"]) < -0.30:
score -= 1.0
if annualized_return < 0.08:
score -= 0.6
return score
def _evaluate_variant(variant: ExtendedFilterVariant) -> dict[str, object]:
if GLOBAL_BUNDLE is None or GLOBAL_LATEST_BAR is None:
raise RuntimeError("global bundle not initialized")
cfg: Strategy32Config = build_strategy32_config(PROFILE_V7_DEFAULT)
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
cfg.universe_min_avg_dollar_volume = variant.avg_dollar_volume_floor
cfg.momentum_min_score = variant.momentum_min_score
cfg.momentum_min_relative_strength = variant.momentum_min_relative_strength
cfg.momentum_min_7d_return = variant.momentum_min_7d_return
cfg.max_pairwise_correlation = variant.max_pairwise_correlation
cfg.carry_min_expected_edge = variant.carry_min_expected_edge
bundle = GLOBAL_BUNDLE
eval_end = GLOBAL_LATEST_BAR
eval_start = eval_end - pd.Timedelta(days=1825)
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
sliced = slice_bundle(bundle, raw_start, eval_end)
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
full_metrics = evaluate_window_result(
result,
eval_start=eval_start,
bars_per_day=backtester.engine_config.bars_per_day,
)
curve = result.equity_curve.loc[result.equity_curve.index >= eval_start]
rolling_metrics = _summarize_rolling(curve)
score = _score_variant(full_metrics, rolling_metrics)
return {
"variant": asdict(variant),
"score": score,
"full_metrics": full_metrics,
"rolling_metrics": rolling_metrics,
"engine_pnl": result.engine_pnl,
"total_trades": result.total_trades,
"universe_size": len(bundle.prices),
}
def _evaluate_exact_windows(variant: ExtendedFilterVariant) -> dict[str, object]:
if GLOBAL_BUNDLE is None or GLOBAL_LATEST_BAR is None:
raise RuntimeError("global bundle not initialized")
cfg: Strategy32Config = build_strategy32_config(PROFILE_V7_DEFAULT)
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
cfg.universe_min_avg_dollar_volume = variant.avg_dollar_volume_floor
cfg.momentum_min_score = variant.momentum_min_score
cfg.momentum_min_relative_strength = variant.momentum_min_relative_strength
cfg.momentum_min_7d_return = variant.momentum_min_7d_return
cfg.max_pairwise_correlation = variant.max_pairwise_correlation
cfg.carry_min_expected_edge = variant.carry_min_expected_edge
bundle = GLOBAL_BUNDLE
results: dict[str, dict[str, float | int | str]] = {}
for days, label in FINAL_WINDOWS:
eval_end = GLOBAL_LATEST_BAR
eval_start = eval_end - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
sliced = slice_bundle(bundle, raw_start, eval_end)
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
metrics = evaluate_window_result(
result,
eval_start=eval_start,
bars_per_day=backtester.engine_config.bars_per_day,
)
metrics["engine_pnl"] = result.engine_pnl
metrics["total_trades"] = result.total_trades
results[label] = metrics
return {
"variant": asdict(variant),
"results": results,
}
def _run_parallel(func, variants: list[ExtendedFilterVariant], workers: int = 6) -> list[dict[str, object]]:
ctx = mp.get_context("fork")
rows: list[dict[str, object]] = []
with ProcessPoolExecutor(max_workers=workers, mp_context=ctx) as executor:
future_map = {executor.submit(func, variant): variant for variant in variants}
total = len(future_map)
done = 0
for future in as_completed(future_map):
row = future.result()
rows.append(row)
done += 1
if "full_metrics" in row:
print(
f"[{done:03d}/{total}] {row['variant']['name']} "
f"score={float(row['score']):.3f} "
f"5y={float(row['full_metrics']['total_return']) * 100:.2f}% "
f"mdd={float(row['full_metrics']['max_drawdown']) * 100:.2f}%",
flush=True,
)
else:
metrics = row["results"]
print(
f"[{done:02d}/{total}] exact {row['variant']['name']} "
f"1y={float(metrics['1y']['total_return']) * 100:.2f}% "
f"3y={float(metrics['3y']['total_return']) * 100:.2f}% "
f"5y={float(metrics['5y']['total_return']) * 100:.2f}%",
flush=True,
)
rows.sort(key=lambda row: float(row.get("score", 0.0)), reverse=True)
return rows
def _all_combos() -> list[ExtendedFilterVariant]:
variants: list[ExtendedFilterVariant] = []
for floor, avg_mult, score, rs, ret7, corr, carry in itertools.product(
LIQUIDITY_FLOORS,
AVG_DOLLAR_VOLUME_MULTIPLIERS,
MOMENTUM_MIN_SCORES,
MOMENTUM_MIN_RS,
MOMENTUM_MIN_7D,
CORRELATION_CAPS,
CARRY_MIN_EDGES,
):
name = (
f"liq{int(floor/1_000_000)}m"
f"_avg{avg_mult:.2f}"
f"_s{score:.2f}"
f"_rs{rs:.2f}"
f"_r7{ret7:.2f}"
f"_corr{corr:.2f}"
f"_carry{carry:.3f}"
)
variants.append(
ExtendedFilterVariant(
name=name,
liquidity_floor=floor,
avg_dollar_volume_floor=floor * avg_mult,
momentum_min_score=score,
momentum_min_relative_strength=rs,
momentum_min_7d_return=ret7,
max_pairwise_correlation=corr,
carry_min_expected_edge=carry,
)
)
return variants
def _seed_variants() -> list[ExtendedFilterVariant]:
return [
ExtendedFilterVariant("prev_balanced", 50_000_000.0, 50_000_000.0, 0.60, 0.00, 0.00, 0.70, 0.0),
ExtendedFilterVariant("prev_profit", 50_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.78, 0.0),
ExtendedFilterVariant("prev_profit_carry", 50_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.78, 0.002),
]
def _build_random_sample(seed: int) -> list[ExtendedFilterVariant]:
rng = random.Random(seed)
combos = _all_combos()
seeded_names = {variant.name for variant in _seed_variants()}
sample = rng.sample(combos, k=min(RANDOM_SAMPLE_SIZE, len(combos)))
unique: dict[str, ExtendedFilterVariant] = {variant.name: variant for variant in _seed_variants()}
for variant in sample:
if variant.name in seeded_names:
continue
unique[variant.name] = variant
return list(unique.values())
def _neighbor_values(value: float, values: list[float]) -> list[float]:
idx = values.index(value)
start = max(0, idx - 1)
end = min(len(values), idx + 2)
return values[start:end]
def _build_local_variants(rows: list[dict[str, object]]) -> list[ExtendedFilterVariant]:
seen: dict[str, ExtendedFilterVariant] = {}
for row in rows[:TOP_BROAD_FOR_LOCAL]:
base = row["variant"]
floor_values = _neighbor_values(float(base["liquidity_floor"]), LIQUIDITY_FLOORS)
avg_mult_values = _neighbor_values(float(base["avg_dollar_volume_floor"]) / float(base["liquidity_floor"]), AVG_DOLLAR_VOLUME_MULTIPLIERS)
score_values = _neighbor_values(float(base["momentum_min_score"]), MOMENTUM_MIN_SCORES)
rs_values = _neighbor_values(float(base["momentum_min_relative_strength"]), MOMENTUM_MIN_RS)
ret7_values = _neighbor_values(float(base["momentum_min_7d_return"]), MOMENTUM_MIN_7D)
corr_values = _neighbor_values(float(base["max_pairwise_correlation"]), CORRELATION_CAPS)
carry_values = _neighbor_values(float(base["carry_min_expected_edge"]), CARRY_MIN_EDGES)
for floor, avg_mult, score, rs, ret7, corr, carry in itertools.product(
floor_values,
avg_mult_values,
score_values,
rs_values,
ret7_values,
corr_values,
carry_values,
):
name = (
f"liq{int(floor/1_000_000)}m"
f"_avg{avg_mult:.2f}"
f"_s{score:.2f}"
f"_rs{rs:.2f}"
f"_r7{ret7:.2f}"
f"_corr{corr:.2f}"
f"_carry{carry:.3f}"
)
seen[name] = ExtendedFilterVariant(
name=name,
liquidity_floor=floor,
avg_dollar_volume_floor=floor * avg_mult,
momentum_min_score=score,
momentum_min_relative_strength=rs,
momentum_min_7d_return=ret7,
max_pairwise_correlation=corr,
carry_min_expected_edge=carry,
)
variants = list(seen.values())
if len(variants) <= LOCAL_SAMPLE_SIZE:
return variants
rng = random.Random(RANDOM_SEED + 1)
return rng.sample(variants, k=LOCAL_SAMPLE_SIZE)
def main() -> None:
global GLOBAL_BUNDLE, GLOBAL_LATEST_BAR
base = build_strategy32_config(PROFILE_V7_DEFAULT)
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
max_days = max(days for days, _ in FINAL_WINDOWS)
lowest_floor = min(LIQUIDITY_FLOORS)
start = end - pd.Timedelta(days=max_days + base.warmup_days + 14)
print(f"building unbiased discovery bundle with current-volume filter disabled")
GLOBAL_BUNDLE, GLOBAL_LATEST_BAR, accepted, rejected, quote_by_symbol = build_strategy32_market_bundle(
symbols=base.symbols,
auto_discover_symbols=True,
quote_assets=base.quote_assets,
excluded_base_assets=base.excluded_base_assets,
min_quote_volume_24h=lowest_floor,
start=start,
end=end,
timeframe=base.timeframe,
max_staleness_days=base.max_symbol_staleness_days,
)
broad_variants = _build_random_sample(RANDOM_SEED)
print(f"running broad search on {len(broad_variants)} variants")
broad_rows = _run_parallel(_evaluate_variant, broad_variants)
local_variants = _build_local_variants(broad_rows)
already = {row["variant"]["name"] for row in broad_rows}
local_variants = [variant for variant in local_variants if variant.name not in already]
print(f"running local refinement on {len(local_variants)} variants")
local_rows = _run_parallel(_evaluate_variant, local_variants)
merged: dict[str, dict[str, object]] = {}
for row in broad_rows + local_rows:
merged[row["variant"]["name"]] = row
ranked = sorted(merged.values(), key=lambda row: float(row["score"]), reverse=True)
exact_variants = [ExtendedFilterVariant(**row["variant"]) for row in ranked[:TOP_FINAL_FOR_EXACT]]
print(f"running exact 1y/3y/5y validation on top {len(exact_variants)} variants")
exact_rows = _run_parallel(_evaluate_exact_windows, exact_variants)
exact_by_name = {row["variant"]["name"]: row for row in exact_rows}
final_ranked: list[dict[str, object]] = []
for row in ranked[:TOP_FINAL_FOR_EXACT]:
name = row["variant"]["name"]
final_ranked.append(
{
**row,
"exact_windows": exact_by_name[name]["results"],
}
)
payload = {
"strategy": "strategy32",
"analysis": "wide_universe_filter_search_extended",
"profile": PROFILE_V7_DEFAULT,
"initial_capital": 1000.0,
"latest_completed_bar": str(GLOBAL_LATEST_BAR),
"accepted_symbols": accepted,
"rejected_symbols": rejected,
"quote_by_symbol": quote_by_symbol,
"broad_variants": len(broad_rows),
"local_variants": len(local_rows),
"ranked_top20": ranked[:20],
"final_ranked": final_ranked,
}
out = Path("/tmp/strategy32_filter_search_extended.json")
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print("\nTop exact variants")
for row in final_ranked:
metrics = row["exact_windows"]
print(
row["variant"]["name"],
"score",
round(float(row["score"]), 3),
"1y",
round(float(metrics["1y"]["total_return"]) * 100, 2),
"3y",
round(float(metrics["3y"]["total_return"]) * 100, 2),
"5y",
round(float(metrics["5y"]["total_return"]) * 100, 2),
"mdd5y",
round(float(metrics["5y"]["max_drawdown"]) * 100, 2),
)
print("\nwrote", out)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,30 @@
from __future__ import annotations
import json
import sys
from pathlib import Path
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.research.hybrid_regime import run_hybrid_backtest
def main() -> None:
payload = run_hybrid_backtest()
out = Path("/tmp/strategy32_hybrid_regime_backtest.json")
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
for label, metrics in payload["results"].items():
print(
label,
f"ret={float(metrics['total_return']) * 100:.2f}%",
f"ann={float(metrics['annualized_return']) * 100:.2f}%",
f"sharpe={float(metrics['sharpe']):.2f}",
f"mdd={float(metrics['max_drawdown']) * 100:.2f}%",
)
print(f"wrote {out}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,458 @@
from __future__ import annotations
import itertools
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict, dataclass
from pathlib import Path
from statistics import median
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
from strategy29.backtest.window_analysis import slice_bundle
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness
from strategy32.research.hybrid_regime import (
_build_positive_filter_plan,
_curve_returns,
_run_adverse_component_curve,
_run_static_component_curve,
load_fixed66_bundle,
)
from strategy32.scripts.run_regime_filter_analysis import STRATEGIC_REGIME_PROFILES, build_strategic_regime_frame
OUT_JSON = Path("/tmp/strategy32_hybrid_strategy_search.json")
WINDOWS = (
(365, "1y"),
(730, "2y"),
(1095, "3y"),
(1460, "4y"),
(1825, "5y"),
)
YEAR_PERIODS = (
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
)
EXPANSION_MODES = {
"prev_static": {"filter_name": "prev_balanced", "guarded": False},
"guarded_static": {"filter_name": "guarded_positive", "guarded": False},
"guarded_switch": {"filter_name": "prev_balanced", "guarded": True},
"overheat_static": {"filter_name": "overheat_tolerant", "guarded": False},
}
EUPHORIA_MODES = {
"overheat_static": {"filter_name": "overheat_tolerant", "guarded": False},
"guarded_static": {"filter_name": "guarded_euphoria", "guarded": False},
"guarded_switch": {"filter_name": "overheat_tolerant", "guarded": True},
"prev_static": {"filter_name": "prev_balanced", "guarded": False},
}
CAP_ENGINES = ("cap_cash", "cap_btc_rebound")
CHOP_ENGINES = ("chop_cash", "chop_inverse_carry", "chop_inverse_carry_strict")
DIST_ENGINES = ("dist_cash", "dist_inverse_carry_strict")
@dataclass(frozen=True, slots=True)
class HybridCandidate:
regime_profile: str
expansion_mode: str
euphoria_mode: str
cap_engine: str
chop_engine: str
dist_engine: str
@property
def name(self) -> str:
return (
f"{self.regime_profile}"
f"|exp:{self.expansion_mode}"
f"|eup:{self.euphoria_mode}"
f"|cap:{self.cap_engine}"
f"|chop:{self.chop_engine}"
f"|dist:{self.dist_engine}"
)
def _annualized_return(total_return: float, days: int) -> float:
if days <= 0:
return 0.0
return (1.0 + total_return) ** (365.0 / days) - 1.0
def _segment_curve(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> pd.Series:
segment = curve.loc[(curve.index >= start) & (curve.index <= end)].copy()
if segment.empty:
return segment
base = float(segment.iloc[0])
if base <= 0:
return pd.Series(dtype=float)
return segment / base * 1000.0
def _segment_metrics(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> dict[str, float]:
segment = _segment_curve(curve, start, end)
if len(segment) < 2:
return {
"start": str(start),
"end": str(end),
"total_return": 0.0,
"annualized_return": 0.0,
"sharpe": 0.0,
"max_drawdown": 0.0,
}
total_return = float(segment.iloc[-1] / segment.iloc[0] - 1.0)
days = max(int((end - start) / pd.Timedelta(days=1)), 1)
return {
"start": str(start),
"end": str(end),
"total_return": total_return,
"annualized_return": _annualized_return(total_return, days),
"sharpe": sharpe_ratio(segment, 6),
"max_drawdown": max_drawdown(segment),
}
def _score_candidate(window_results: dict[str, dict[str, float]], year_results: dict[str, dict[str, float]]) -> tuple[float, int, int]:
year_returns = [float(metrics["total_return"]) for metrics in year_results.values()]
negative_years = sum(ret < 0 for ret in year_returns)
mdd_violations = sum(float(metrics["max_drawdown"]) < -0.20 for metrics in window_results.values())
score = 0.0
score += 4.0 * float(window_results["5y"]["annualized_return"])
score += 2.2 * float(window_results["1y"]["annualized_return"])
score += 1.5 * float(window_results["2y"]["annualized_return"])
score += 1.2 * float(window_results["4y"]["annualized_return"])
score += 0.8 * float(window_results["3y"]["annualized_return"])
score += 1.5 * float(window_results["5y"]["sharpe"])
score += 0.8 * float(window_results["1y"]["sharpe"])
score += 2.0 * min(year_returns)
score += 1.0 * median(year_returns)
score += 0.75 * sum(max(ret, 0.0) for ret in year_returns)
score -= 3.0 * negative_years
score -= 0.75 * mdd_violations
for label in ("1y", "2y", "3y", "4y", "5y"):
dd = abs(float(window_results[label]["max_drawdown"]))
score -= max(0.0, dd - 0.20) * 4.0
return score, negative_years, mdd_violations
def _compose_full_curve(
*,
latest_bar: pd.Timestamp,
timestamps: list[pd.Timestamp],
regime_map: dict[pd.Timestamp, str],
component_returns: dict[str, pd.Series],
candidate: HybridCandidate,
) -> pd.Series:
equity = 1000.0
idx = [timestamps[0]]
vals = [equity]
for i in range(1, len(timestamps)):
signal_ts = timestamps[i - 1]
execution_ts = timestamps[i]
regime = regime_map.get(signal_ts, "")
if regime == "MOMENTUM_EXPANSION":
key = f"MOMENTUM_EXPANSION::{candidate.expansion_mode}"
elif regime == "EUPHORIC_BREAKOUT":
key = f"EUPHORIC_BREAKOUT::{candidate.euphoria_mode}"
elif regime == "CAPITULATION_STRESS":
key = candidate.cap_engine
elif regime == "CHOPPY_ROTATION":
key = candidate.chop_engine
elif regime == "DISTRIBUTION_DRIFT":
key = candidate.dist_engine
else:
key = ""
ret = float(component_returns.get(key, pd.Series(dtype=float)).get(execution_ts, 0.0))
equity *= max(0.0, 1.0 + ret)
idx.append(execution_ts)
vals.append(equity)
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _exact_validate_candidate(
*,
bundle,
latest_bar: pd.Timestamp,
candidate: HybridCandidate,
) -> dict[str, object]:
def run_period(eval_start: pd.Timestamp, eval_end: pd.Timestamp) -> pd.Series:
raw_start = eval_start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, eval_end)
regime_frame = build_strategic_regime_frame(sliced, eval_start, eval_end, profile=candidate.regime_profile)
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
harness = AdverseRegimeResearchHarness(sliced, eval_end)
component_returns: dict[str, pd.Series] = {}
for mode_name, spec in EXPANSION_MODES.items():
filter_plan = _build_positive_filter_plan(regime_frame, "MOMENTUM_EXPANSION") if spec["guarded"] else None
curve = _run_static_component_curve(
sliced=sliced,
latest_bar=eval_end,
eval_start=eval_start,
regime_map=regime_map,
active_regime="MOMENTUM_EXPANSION",
filter_name=str(spec["filter_name"]),
filter_plan=filter_plan,
)
component_returns[f"MOMENTUM_EXPANSION::{mode_name}"] = _curve_returns(curve)
for mode_name, spec in EUPHORIA_MODES.items():
filter_plan = _build_positive_filter_plan(regime_frame, "EUPHORIC_BREAKOUT") if spec["guarded"] else None
curve = _run_static_component_curve(
sliced=sliced,
latest_bar=eval_end,
eval_start=eval_start,
regime_map=regime_map,
active_regime="EUPHORIC_BREAKOUT",
filter_name=str(spec["filter_name"]),
filter_plan=filter_plan,
)
component_returns[f"EUPHORIC_BREAKOUT::{mode_name}"] = _curve_returns(curve)
for engine_name in {candidate.cap_engine, candidate.chop_engine, candidate.dist_engine}:
curve = _run_adverse_component_curve(
eval_start=eval_start,
engine_name=engine_name,
harness=harness,
regime_frame=regime_frame,
)
component_returns[engine_name] = _curve_returns(curve)
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
return _compose_full_curve(
latest_bar=eval_end,
timestamps=timestamps,
regime_map=regime_map,
component_returns=component_returns,
candidate=candidate,
)
window_results: dict[str, dict[str, float]] = {}
for days, label in WINDOWS:
eval_end = latest_bar
eval_start = eval_end - pd.Timedelta(days=days)
curve = run_period(eval_start, eval_end)
window_results[label] = _segment_metrics(curve, eval_start, eval_end)
year_results: dict[str, dict[str, float]] = {}
for label, start, end_exclusive in YEAR_PERIODS:
eval_end = min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))
curve = run_period(start, eval_end)
year_results[label] = _segment_metrics(curve, start, eval_end)
ytd_start = pd.Timestamp("2026-01-01 00:00:00+00:00")
year_results["2026_YTD"] = _segment_metrics(run_period(ytd_start, latest_bar), ytd_start, latest_bar)
score, negative_years, mdd_violations = _score_candidate(window_results, {k: v for k, v in year_results.items() if k != "2026_YTD"})
return {
"candidate": asdict(candidate),
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": window_results,
"years": year_results,
"validation": "exact_independent_periods",
}
def _build_profile_cache(profile_name: str) -> tuple[str, dict[str, object]]:
bundle, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
eval_start = latest_bar - pd.Timedelta(days=1825)
raw_start = eval_start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, latest_bar)
regime_frame = build_strategic_regime_frame(sliced, eval_start, latest_bar, profile=profile_name)
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
harness = AdverseRegimeResearchHarness(sliced, latest_bar)
component_returns: dict[str, pd.Series] = {}
for mode_name, spec in EXPANSION_MODES.items():
filter_plan = _build_positive_filter_plan(regime_frame, "MOMENTUM_EXPANSION") if spec["guarded"] else None
curve = _run_static_component_curve(
sliced=sliced,
latest_bar=latest_bar,
eval_start=eval_start,
regime_map=regime_map,
active_regime="MOMENTUM_EXPANSION",
filter_name=str(spec["filter_name"]),
filter_plan=filter_plan,
)
component_returns[f"MOMENTUM_EXPANSION::{mode_name}"] = _curve_returns(curve)
for mode_name, spec in EUPHORIA_MODES.items():
filter_plan = _build_positive_filter_plan(regime_frame, "EUPHORIC_BREAKOUT") if spec["guarded"] else None
curve = _run_static_component_curve(
sliced=sliced,
latest_bar=latest_bar,
eval_start=eval_start,
regime_map=regime_map,
active_regime="EUPHORIC_BREAKOUT",
filter_name=str(spec["filter_name"]),
filter_plan=filter_plan,
)
component_returns[f"EUPHORIC_BREAKOUT::{mode_name}"] = _curve_returns(curve)
for engine_name in sorted(set(CAP_ENGINES) | set(CHOP_ENGINES) | set(DIST_ENGINES)):
curve = _run_adverse_component_curve(
eval_start=eval_start,
engine_name=engine_name,
harness=harness,
regime_frame=regime_frame,
)
component_returns[engine_name] = _curve_returns(curve)
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
return profile_name, {
"regime_map": regime_map,
"component_returns": component_returns,
"timestamps": timestamps,
}
def _exact_validate_candidate_worker(candidate_payload: dict[str, str]) -> dict[str, object]:
bundle, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
return _exact_validate_candidate(bundle=bundle, latest_bar=latest_bar, candidate=HybridCandidate(**candidate_payload))
def main() -> None:
bundle, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
profile_caches: dict[str, dict[str, object]] = {}
ctx = mp.get_context("fork")
with ProcessPoolExecutor(max_workers=min(3, len(STRATEGIC_REGIME_PROFILES)), mp_context=ctx) as executor:
future_map = {executor.submit(_build_profile_cache, profile_name): profile_name for profile_name in STRATEGIC_REGIME_PROFILES}
for future in as_completed(future_map):
profile_name, cache = future.result()
profile_caches[profile_name] = cache
print(f"[cache] built {profile_name}", flush=True)
candidates = [
HybridCandidate(*combo)
for combo in itertools.product(
STRATEGIC_REGIME_PROFILES.keys(),
EXPANSION_MODES.keys(),
EUPHORIA_MODES.keys(),
CAP_ENGINES,
CHOP_ENGINES,
DIST_ENGINES,
)
]
rows: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
cache = profile_caches[candidate.regime_profile]
full_curve = _compose_full_curve(
latest_bar=latest_bar,
timestamps=cache["timestamps"],
regime_map=cache["regime_map"],
component_returns=cache["component_returns"],
candidate=candidate,
)
window_results = {
label: _segment_metrics(full_curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
year_results = {
label: _segment_metrics(full_curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
year_results["2026_YTD"] = _segment_metrics(full_curve, pd.Timestamp("2026-01-01 00:00:00+00:00"), latest_bar)
score, negative_years, mdd_violations = _score_candidate(window_results, {k: v for k, v in year_results.items() if k != "2026_YTD"})
row = {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": window_results,
"years": year_results,
"validation": "approx_full_curve_slice",
}
rows.append(row)
print(
f"[{idx:03d}/{len(candidates)}] {candidate.name} "
f"score={score:.3f} neg_years={negative_years} mdd_viol={mdd_violations} "
f"1y={window_results['1y']['total_return']*100:.2f}% "
f"5y_ann={window_results['5y']['annualized_return']*100:.2f}%",
flush=True,
)
rows.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
with ProcessPoolExecutor(max_workers=min(3, len(rows[:3])), mp_context=ctx) as executor:
future_map = {
executor.submit(_exact_validate_candidate_worker, row["candidate"]): row["name"]
for row in rows[:3]
}
exact_top = []
for future in as_completed(future_map):
result = future.result()
exact_top.append(result)
print(f"[exact] validated {future_map[future]}", flush=True)
exact_top.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
payload = {
"analysis": "strategy32_hybrid_strategy_search",
"latest_completed_bar": str(latest_bar),
"candidate_count": len(candidates),
"regime_profiles": list(STRATEGIC_REGIME_PROFILES.keys()),
"expansion_modes": list(EXPANSION_MODES.keys()),
"euphoria_modes": list(EUPHORIA_MODES.keys()),
"cap_engines": list(CAP_ENGINES),
"chop_engines": list(CHOP_ENGINES),
"dist_engines": list(DIST_ENGINES),
"summary": rows[:20],
"exact_top": exact_top,
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print("\nTop 5 approximate candidates", flush=True)
for row in rows[:5]:
print(
row["name"],
"score",
round(float(row["score"]), 3),
"neg_years",
row["negative_years"],
"mdd_viol",
row["mdd_violations"],
"2025",
round(float(row["years"]["2025"]["total_return"]) * 100, 2),
"2024",
round(float(row["years"]["2024"]["total_return"]) * 100, 2),
"1y",
round(float(row["windows"]["1y"]["total_return"]) * 100, 2),
"5y_ann",
round(float(row["windows"]["5y"]["annualized_return"]) * 100, 2),
)
print("\nExact top candidates", flush=True)
for row in exact_top:
print(
HybridCandidate(**row["candidate"]).name,
"score",
round(float(row["score"]), 3),
"neg_years",
row["negative_years"],
"mdd_viol",
row["mdd_violations"],
"2025",
round(float(row["years"]["2025"]["total_return"]) * 100, 2),
"2024",
round(float(row["years"]["2024"]["total_return"]) * 100, 2),
"1y",
round(float(row["windows"]["1y"]["total_return"]) * 100, 2),
"5y_ann",
round(float(row["windows"]["5y"]["annualized_return"]) * 100, 2),
)
print("\nwrote", OUT_JSON, flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,281 @@
from __future__ import annotations
import argparse
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import slice_bundle
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness
from strategy32.research.hybrid_regime import (
_build_positive_filter_plan,
_curve_returns,
_run_adverse_component_curve,
_run_static_component_curve,
load_fixed66_bundle,
)
from strategy32.scripts.run_hybrid_strategy_search import (
CAP_ENGINES,
CHOP_ENGINES,
DIST_ENGINES,
EUPHORIA_MODES,
EXPANSION_MODES,
OUT_JSON,
WINDOWS,
YEAR_PERIODS,
HybridCandidate,
_build_profile_cache,
_compose_full_curve,
_score_candidate,
_segment_metrics,
)
from strategy32.scripts.run_regime_filter_analysis import STRATEGIC_REGIME_PROFILES, build_strategic_regime_frame
FAST_OUT_JSON = Path("/tmp/strategy32_hybrid_strategy_search_fast.json")
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
def _build_candidate_rows(latest_bar: pd.Timestamp, profile_caches: dict[str, dict[str, object]]) -> list[dict[str, object]]:
candidates = [
HybridCandidate(*combo)
for combo in __import__("itertools").product(
STRATEGIC_REGIME_PROFILES.keys(),
EXPANSION_MODES.keys(),
EUPHORIA_MODES.keys(),
CAP_ENGINES,
CHOP_ENGINES,
DIST_ENGINES,
)
]
rows: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
cache = profile_caches[candidate.regime_profile]
full_curve = _compose_full_curve(
latest_bar=latest_bar,
timestamps=cache["timestamps"],
regime_map=cache["regime_map"],
component_returns=cache["component_returns"],
candidate=candidate,
)
window_results = {
label: _segment_metrics(full_curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
year_results = {
label: _segment_metrics(full_curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
year_results["2026_YTD"] = _segment_metrics(full_curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = _score_candidate(
window_results,
{k: v for k, v in year_results.items() if k != "2026_YTD"},
)
row = {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": window_results,
"years": year_results,
"validation": "approx_full_curve_slice",
}
rows.append(row)
print(
f"[approx {idx:03d}/{len(candidates)}] {candidate.name} "
f"score={score:.3f} neg_years={negative_years} mdd_viol={mdd_violations} "
f"1y={window_results['1y']['total_return'] * 100:.2f}% "
f"5y_ann={window_results['5y']['annualized_return'] * 100:.2f}%",
flush=True,
)
rows.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
return rows
def _period_specs(latest_bar: pd.Timestamp) -> list[tuple[str, str, pd.Timestamp, pd.Timestamp]]:
specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
for days, label in WINDOWS:
specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
for label, start, end_exclusive in YEAR_PERIODS:
specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
specs.append(("year", "2026_YTD", YTD_START, latest_bar))
return specs
def _exact_period_worker(candidate_payload: dict[str, str], period_spec: tuple[str, str, str, str]) -> tuple[str, str, dict[str, float]]:
kind, label, start_text, end_text = period_spec
eval_start = pd.Timestamp(start_text)
eval_end = pd.Timestamp(end_text)
bundle, _ = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
candidate = HybridCandidate(**candidate_payload)
raw_start = eval_start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, eval_end)
regime_frame = build_strategic_regime_frame(sliced, eval_start, eval_end, profile=candidate.regime_profile)
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
harness = AdverseRegimeResearchHarness(sliced, eval_end)
component_returns: dict[str, pd.Series] = {}
for mode_name, spec in EXPANSION_MODES.items():
filter_plan = _build_positive_filter_plan(regime_frame, "MOMENTUM_EXPANSION") if spec["guarded"] else None
curve = _run_static_component_curve(
sliced=sliced,
latest_bar=eval_end,
eval_start=eval_start,
regime_map=regime_map,
active_regime="MOMENTUM_EXPANSION",
filter_name=str(spec["filter_name"]),
filter_plan=filter_plan,
)
component_returns[f"MOMENTUM_EXPANSION::{mode_name}"] = _curve_returns(curve)
for mode_name, spec in EUPHORIA_MODES.items():
filter_plan = _build_positive_filter_plan(regime_frame, "EUPHORIC_BREAKOUT") if spec["guarded"] else None
curve = _run_static_component_curve(
sliced=sliced,
latest_bar=eval_end,
eval_start=eval_start,
regime_map=regime_map,
active_regime="EUPHORIC_BREAKOUT",
filter_name=str(spec["filter_name"]),
filter_plan=filter_plan,
)
component_returns[f"EUPHORIC_BREAKOUT::{mode_name}"] = _curve_returns(curve)
for engine_name in sorted({candidate.cap_engine, candidate.chop_engine, candidate.dist_engine}):
curve = _run_adverse_component_curve(
eval_start=eval_start,
engine_name=engine_name,
harness=harness,
regime_frame=regime_frame,
)
component_returns[engine_name] = _curve_returns(curve)
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
curve = _compose_full_curve(
latest_bar=eval_end,
timestamps=timestamps,
regime_map=regime_map,
component_returns=component_returns,
candidate=candidate,
)
return kind, label, _segment_metrics(curve, eval_start, eval_end)
def _exact_validate_candidate_parallel(
candidate: HybridCandidate,
latest_bar: pd.Timestamp,
*,
max_workers: int,
) -> dict[str, object]:
period_specs = [
(kind, label, str(start), str(end))
for kind, label, start, end in _period_specs(latest_bar)
]
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
ctx = mp.get_context("fork")
with ProcessPoolExecutor(max_workers=min(max_workers, len(period_specs)), mp_context=ctx) as executor:
future_map = {
executor.submit(_exact_period_worker, asdict(candidate), period_spec): period_spec
for period_spec in period_specs
}
for future in as_completed(future_map):
kind, label, metrics = future.result()
if kind == "window":
window_results[label] = metrics
else:
year_results[label] = metrics
print(
f"[exact-period] {candidate.name} {label} "
f"ret={metrics['total_return'] * 100:.2f}% "
f"mdd={metrics['max_drawdown'] * 100:.2f}%",
flush=True,
)
ordered_windows = {label: window_results[label] for _, label in WINDOWS}
ordered_years = {label: year_results[label] for label, _, _ in YEAR_PERIODS}
ordered_years["2026_YTD"] = year_results["2026_YTD"]
score, negative_years, mdd_violations = _score_candidate(
ordered_windows,
{k: v for k, v in ordered_years.items() if k != "2026_YTD"},
)
return {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": ordered_windows,
"years": ordered_years,
"validation": "exact_independent_periods_parallel",
}
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--top-n", type=int, default=3)
parser.add_argument("--exact-workers", type=int, default=6)
parser.add_argument("--out", type=str, default=str(FAST_OUT_JSON))
args = parser.parse_args()
_, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
profile_caches: dict[str, dict[str, object]] = {}
ctx = mp.get_context("fork")
with ProcessPoolExecutor(max_workers=min(3, len(STRATEGIC_REGIME_PROFILES)), mp_context=ctx) as executor:
future_map = {
executor.submit(_build_profile_cache, profile_name): profile_name
for profile_name in STRATEGIC_REGIME_PROFILES
}
for future in as_completed(future_map):
profile_name, cache = future.result()
profile_caches[profile_name] = cache
print(f"[cache] built {profile_name}", flush=True)
rows = _build_candidate_rows(latest_bar, profile_caches)
exact_top: list[dict[str, object]] = []
for row in rows[: args.top_n]:
candidate = HybridCandidate(**row["candidate"])
print(f"[exact-start] {candidate.name}", flush=True)
exact_top.append(
_exact_validate_candidate_parallel(
candidate,
latest_bar,
max_workers=args.exact_workers,
)
)
exact_top.sort(key=lambda item: (int(item["negative_years"]), int(item["mdd_violations"]), -float(item["score"])))
payload = {
"analysis": "strategy32_hybrid_strategy_search_fast",
"latest_completed_bar": str(latest_bar),
"candidate_count": len(rows),
"summary": rows[:20],
"exact_top": exact_top,
}
Path(args.out).write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(f"[exact-done] {candidate.name}", flush=True)
Path(OUT_JSON).write_text(
json.dumps(
{
"analysis": "strategy32_hybrid_strategy_search_fast_link",
"source": str(Path(args.out)),
},
indent=2,
),
encoding="utf-8",
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,54 @@
from __future__ import annotations
import json
import sys
from pathlib import Path
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
from strategy32.research.soft_router import evaluate_cash_overlay_exact, load_component_bundle
OUT_JSON = Path("/tmp/strategy32_live_combo_backtest.json")
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
payload = evaluate_cash_overlay_exact(
bundle=bundle,
latest_bar=latest_bar,
candidate=BEST_CASH_OVERLAY,
cache_path=CACHE_PATH,
max_workers=6,
core_config_overrides={
**LIVE_STRATEGY_OVERRIDES,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
},
)
payload["backtest_basis"] = {
"universe": "fixed66 cached bundle",
"core_filter": "overheat_tolerant",
"cash_overlay": payload["candidate"],
"core_config_overrides": {
**LIVE_STRATEGY_OVERRIDES,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
},
"execution_refinement_note": "4h proxy in research bundle; live 1h refinement is not replayed here",
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,62 @@
from __future__ import annotations
import argparse
import asyncio
import logging
import os
import sys
from pathlib import Path
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.env import load_dotenv
from strategy32.live.runtime import run_monitor
def _default_env_candidates() -> list[Path]:
return [
Path(__file__).resolve().parents[1] / ".env",
Path("/Volumes/SSD/workspace/money-bot/strategy11/.env"),
Path("/Volumes/SSD/workspace/money-bot/strategy7/engine_a_mm/.env"),
Path("/Volumes/SSD/workspace/money-bot/strategy7/engine_aa_mm/.env"),
]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Run Strategy32 live paper/advisory monitor")
parser.add_argument("--once", action="store_true", help="Run one cycle and exit")
parser.add_argument("--runtime-dir", type=str, default=os.getenv("STRATEGY32_RUNTIME_DIR", "runtime"))
parser.add_argument("--env-file", type=str, default="")
parser.add_argument("--log-level", type=str, default=os.getenv("STRATEGY32_LOG_LEVEL", "INFO"))
return parser.parse_args()
def main() -> None:
args = parse_args()
if args.env_file:
load_dotenv(args.env_file)
else:
for env_path in _default_env_candidates():
if env_path.exists():
load_dotenv(env_path)
break
runtime_dir = Path(args.runtime_dir)
runtime_dir.mkdir(parents=True, exist_ok=True)
handlers: list[logging.Handler] = [
logging.StreamHandler(),
logging.FileHandler(runtime_dir / "strategy32_live.log", encoding="utf-8"),
]
logging.basicConfig(
level=getattr(logging, args.log_level.upper(), logging.INFO),
format="%(asctime)s %(levelname)-5s %(name)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
handlers=handlers,
)
asyncio.run(run_monitor(once=args.once, runtime_dir=args.runtime_dir))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,166 @@
from __future__ import annotations
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
from strategy32.research.soft_router import (
build_cash_overlay_period_components,
compose_cash_overlay_curve,
segment_metrics,
load_component_bundle,
)
OUT_JSON = Path("/tmp/strategy32_recent_core_filter_comparison.json")
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
PERIODS = {
"1y": lambda latest_bar: (latest_bar - pd.Timedelta(days=365), latest_bar),
"2y": lambda latest_bar: (latest_bar - pd.Timedelta(days=730), latest_bar),
"5y": lambda latest_bar: (latest_bar - pd.Timedelta(days=1825), latest_bar),
"2024": lambda latest_bar: (pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2024-12-31 23:59:59+00:00")),
"2025": lambda latest_bar: (pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2025-12-31 23:59:59+00:00")),
"2026_YTD": lambda latest_bar: (pd.Timestamp("2026-01-01 00:00:00+00:00"), latest_bar),
}
VARIANTS: dict[str, dict[str, object]] = {
"current_overheat": {
"core_filter": "overheat_tolerant",
"overrides": {
**LIVE_STRATEGY_OVERRIDES,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
},
},
"prev_balanced": {
"core_filter": "prev_balanced",
"overrides": {
"enable_liquidity_universe_fallback": False,
"enable_momentum_filter_fallback": False,
"enable_carry_score_fallback": False,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
},
},
"guarded_positive": {
"core_filter": "guarded_positive",
"overrides": {
"enable_liquidity_universe_fallback": False,
"enable_momentum_filter_fallback": False,
"enable_carry_score_fallback": False,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
},
},
"relaxed_overheat": {
"core_filter": "overheat_tolerant",
"overrides": {
**LIVE_STRATEGY_OVERRIDES,
"momentum_min_score": 0.58,
"momentum_min_relative_strength": -0.03,
"momentum_min_7d_return": 0.00,
"universe_min_avg_dollar_volume": 75_000_000.0,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
},
},
}
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
output: dict[str, object] = {
"latest_bar": str(latest_bar),
"candidate": {
"regime_profile": BEST_CASH_OVERLAY.regime_profile,
"cap_engine": BEST_CASH_OVERLAY.cap_engine,
"chop_engine": BEST_CASH_OVERLAY.chop_engine,
"dist_engine": BEST_CASH_OVERLAY.dist_engine,
},
"note": "fixed66 cached bundle, 4h proxy execution, same cash-overlay with different core filters",
"variants": {},
}
for name, spec in VARIANTS.items():
output["variants"][name] = {
"core_filter": spec["core_filter"],
"overrides": spec["overrides"],
"results": {},
}
tasks: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
for variant_name in VARIANTS:
for label, period_fn in PERIODS.items():
start, end = period_fn(latest_bar)
tasks.append((variant_name, label, start, end))
ctx = mp.get_context("fork")
with ProcessPoolExecutor(max_workers=min(6, len(tasks)), mp_context=ctx) as executor:
future_map = {
executor.submit(
_period_worker,
CACHE_PATH,
variant_name,
label,
str(start),
str(end),
): (variant_name, label)
for variant_name, label, start, end in tasks
}
for future in as_completed(future_map):
variant_name, label = future_map[future]
print(f"[done] {variant_name} {label}", flush=True)
result = future.result()
output["variants"][variant_name]["results"][label] = result
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(output, indent=2))
print(f"[saved] {OUT_JSON}")
def _period_worker(
cache_path: str,
variant_name: str,
label: str,
start_text: str,
end_text: str,
) -> dict[str, float]:
bundle, _latest_bar = load_component_bundle(cache_path)
spec = VARIANTS[variant_name]
start = pd.Timestamp(start_text)
end = pd.Timestamp(end_text)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=str(spec["core_filter"]),
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=dict(spec["overrides"]),
)
curve, _weights = compose_cash_overlay_curve(candidate=BEST_CASH_OVERLAY, **components)
return segment_metrics(curve, start, end)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,498 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
from strategy29.backtest.window_analysis import slice_bundle
from strategy29.common.constants import BTC_SYMBOL
from strategy29.signal.btc_regime import BTCRegimeEngine
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V7_DEFAULT, Strategy32Config, build_engine_config, build_strategy32_config
from strategy32.data import build_strategy32_market_bundle
OUT_JSON = Path("/tmp/strategy32_regime_filter_analysis.json")
OUT_MD = Path("/Volumes/SSD/data/nextcloud/data/tara/files/📂HeadOffice/money-bot/strategy32/008_레짐별_필터적합도_분석.md")
@dataclass(slots=True)
class FilterVariant:
key: str
label: str
liquidity_floor: float
avg_dollar_floor: float
momentum_score: float
relative_strength: float
ret7: float
corr_cap: float
carry_edge: float
@dataclass(frozen=True, slots=True)
class StrategicRegimeProfile:
name: str
panic_atr: float
panic_bar_return: float
panic_breadth: float
panic_funding: float
euphoria_daily_gap: float
euphoria_intraday_gap: float
euphoria_breadth: float
euphoria_breadth_persist: float
euphoria_positive_ratio: float
euphoria_funding_persist: float
euphoria_funding: float
euphoria_btc_7d: float
expansion_daily_gap: float
expansion_intraday_gap: float
expansion_breadth: float
expansion_breadth_persist: float
expansion_atr: float
expansion_min_funding: float
expansion_btc_7d: float
distribution_daily_gap: float
distribution_intraday_gap: float
distribution_breadth: float
distribution_positive_ratio: float
STRATEGIC_REGIME_PROFILES: dict[str, StrategicRegimeProfile] = {
"base": StrategicRegimeProfile(
name="base",
panic_atr=0.05,
panic_bar_return=-0.05,
panic_breadth=0.22,
panic_funding=-0.00005,
euphoria_daily_gap=0.05,
euphoria_intraday_gap=0.015,
euphoria_breadth=0.68,
euphoria_breadth_persist=0.62,
euphoria_positive_ratio=0.72,
euphoria_funding_persist=0.66,
euphoria_funding=0.00012,
euphoria_btc_7d=0.10,
expansion_daily_gap=0.02,
expansion_intraday_gap=0.00,
expansion_breadth=0.55,
expansion_breadth_persist=0.52,
expansion_atr=0.05,
expansion_min_funding=-0.00003,
expansion_btc_7d=0.0,
distribution_daily_gap=0.00,
distribution_intraday_gap=0.00,
distribution_breadth=0.45,
distribution_positive_ratio=0.45,
),
"tight_positive": StrategicRegimeProfile(
name="tight_positive",
panic_atr=0.048,
panic_bar_return=-0.048,
panic_breadth=0.24,
panic_funding=-0.00004,
euphoria_daily_gap=0.055,
euphoria_intraday_gap=0.018,
euphoria_breadth=0.72,
euphoria_breadth_persist=0.66,
euphoria_positive_ratio=0.75,
euphoria_funding_persist=0.70,
euphoria_funding=0.00014,
euphoria_btc_7d=0.12,
expansion_daily_gap=0.028,
expansion_intraday_gap=0.004,
expansion_breadth=0.60,
expansion_breadth_persist=0.57,
expansion_atr=0.045,
expansion_min_funding=0.0,
expansion_btc_7d=0.02,
distribution_daily_gap=0.005,
distribution_intraday_gap=0.002,
distribution_breadth=0.48,
distribution_positive_ratio=0.48,
),
"loose_positive": StrategicRegimeProfile(
name="loose_positive",
panic_atr=0.052,
panic_bar_return=-0.055,
panic_breadth=0.20,
panic_funding=-0.00006,
euphoria_daily_gap=0.045,
euphoria_intraday_gap=0.012,
euphoria_breadth=0.64,
euphoria_breadth_persist=0.58,
euphoria_positive_ratio=0.68,
euphoria_funding_persist=0.62,
euphoria_funding=0.00010,
euphoria_btc_7d=0.08,
expansion_daily_gap=0.015,
expansion_intraday_gap=-0.002,
expansion_breadth=0.50,
expansion_breadth_persist=0.48,
expansion_atr=0.055,
expansion_min_funding=-0.00005,
expansion_btc_7d=-0.01,
distribution_daily_gap=-0.005,
distribution_intraday_gap=-0.004,
distribution_breadth=0.42,
distribution_positive_ratio=0.42,
),
}
FILTER_VARIANTS = [
FilterVariant("prev_balanced", "Legacy Balanced", 50_000_000.0, 50_000_000.0, 0.60, 0.00, 0.00, 0.70, 0.0),
FilterVariant("prev_profit", "Legacy Profit", 50_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.78, 0.0),
FilterVariant("new_default", "New Durable", 100_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.70, 0.0),
FilterVariant("high_liq_breakout", "High-Liq Breakout", 100_000_000.0, 75_000_000.0, 0.65, 0.00, 0.02, 0.78, 0.001),
FilterVariant("overheat_tolerant", "Overheat Tolerant", 100_000_000.0, 100_000_000.0, 0.60, -0.02, 0.02, 0.78, 0.0),
FilterVariant("ultra_selective", "Ultra Selective", 100_000_000.0, 50_000_000.0, 0.70, 0.02, 0.00, 0.78, 0.0),
]
def _price_at(df: pd.DataFrame, timestamp: pd.Timestamp) -> float:
row = df.loc[df["timestamp"] == timestamp]
if row.empty:
return 0.0
return float(row["close"].iloc[-1])
def _funding_row(df: pd.DataFrame, timestamp: pd.Timestamp) -> pd.Series | None:
row = df.loc[df["timestamp"] == timestamp]
if row.empty:
return None
return row.iloc[-1]
def _daily_return(series: pd.Series, bars_back: int) -> pd.Series:
return series / series.shift(bars_back) - 1.0
def build_strategic_regime_frame(
bundle,
eval_start: pd.Timestamp,
latest_bar: pd.Timestamp,
profile: StrategicRegimeProfile | str = "base",
) -> pd.DataFrame:
if isinstance(profile, str):
profile = STRATEGIC_REGIME_PROFILES[profile]
btc_prices = bundle.prices[BTC_SYMBOL]
prepared = BTCRegimeEngine(build_engine_config().regime).prepare(btc_prices)
prepared = prepared.loc[prepared["timestamp"] >= eval_start].copy()
timestamps = prepared["timestamp"].tolist()
breadths: list[float] = []
mean_funding: list[float] = []
positive_funding_ratio: list[float] = []
mean_basis: list[float] = []
btc_7d_return: list[float] = []
btc_30d_return: list[float] = []
for idx, ts in enumerate(timestamps):
votes = []
funding_vals = []
basis_vals = []
positive_votes = []
for symbol, df in bundle.prices.items():
if symbol == BTC_SYMBOL:
continue
hist = df.loc[df["timestamp"] <= ts].tail(30)
if len(hist) >= 10:
ema = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
votes.append(float(hist["close"].iloc[-1] > ema))
f_df = bundle.funding.get(symbol)
if f_df is None:
continue
f_hist = f_df.loc[f_df["timestamp"] <= ts].tail(6)
if f_hist.empty:
continue
funding_vals.append(float(f_hist["funding_rate"].mean()))
basis_vals.append(float(f_hist["basis"].iloc[-1]))
positive_votes.append(float((f_hist["funding_rate"] > 0).mean()))
breadths.append(sum(votes) / len(votes) if votes else 0.5)
mean_funding.append(sum(funding_vals) / len(funding_vals) if funding_vals else 0.0)
mean_basis.append(sum(basis_vals) / len(basis_vals) if basis_vals else 0.0)
positive_funding_ratio.append(sum(positive_votes) / len(positive_votes) if positive_votes else 0.5)
if idx >= 42:
btc_7d_return.append(float(prepared["close"].iloc[idx] / prepared["close"].iloc[idx - 42] - 1.0))
else:
btc_7d_return.append(0.0)
if idx >= 180:
btc_30d_return.append(float(prepared["close"].iloc[idx] / prepared["close"].iloc[idx - 180] - 1.0))
else:
btc_30d_return.append(0.0)
prepared["breadth"] = breadths
prepared["mean_alt_funding"] = mean_funding
prepared["mean_alt_basis"] = mean_basis
prepared["positive_funding_ratio"] = positive_funding_ratio
prepared["btc_7d_return"] = btc_7d_return
prepared["btc_30d_return"] = btc_30d_return
prepared["daily_trend_gap"] = prepared["daily_close"] / prepared["daily_ema_slow"] - 1.0
prepared["intraday_trend_gap"] = prepared["close"] / prepared["ema_slow"] - 1.0
prepared["breadth_persist"] = prepared["breadth"].rolling(18, min_periods=6).mean()
prepared["funding_persist"] = prepared["positive_funding_ratio"].rolling(18, min_periods=6).mean()
regimes: list[str] = []
for row in prepared.itertuples(index=False):
breadth = float(row.breadth)
breadth_persist = float(row.breadth_persist) if pd.notna(row.breadth_persist) else breadth
atr = float(row.atr_pct) if pd.notna(row.atr_pct) else 0.0
bar_ret = float(row.bar_return) if pd.notna(row.bar_return) else 0.0
daily_gap = float(row.daily_trend_gap) if pd.notna(row.daily_trend_gap) else 0.0
intra_gap = float(row.intraday_trend_gap) if pd.notna(row.intraday_trend_gap) else 0.0
avg_funding = float(row.mean_alt_funding)
positive_ratio = float(row.positive_funding_ratio)
funding_persist = float(row.funding_persist) if pd.notna(row.funding_persist) else positive_ratio
btc_7d = float(row.btc_7d_return)
panic = (
atr >= profile.panic_atr
or bar_ret <= profile.panic_bar_return
or (breadth <= profile.panic_breadth and avg_funding < profile.panic_funding)
)
euphoria = (
daily_gap > profile.euphoria_daily_gap
and intra_gap > profile.euphoria_intraday_gap
and breadth >= profile.euphoria_breadth
and breadth_persist >= profile.euphoria_breadth_persist
and positive_ratio >= profile.euphoria_positive_ratio
and funding_persist >= profile.euphoria_funding_persist
and (avg_funding >= profile.euphoria_funding or btc_7d >= profile.euphoria_btc_7d)
)
expansion = (
daily_gap > profile.expansion_daily_gap
and intra_gap > profile.expansion_intraday_gap
and breadth >= profile.expansion_breadth
and breadth_persist >= profile.expansion_breadth_persist
and atr < profile.expansion_atr
and avg_funding > profile.expansion_min_funding
and btc_7d > profile.expansion_btc_7d
and not euphoria
)
distribution = (
(daily_gap < profile.distribution_daily_gap and breadth < profile.distribution_breadth)
or (intra_gap < profile.distribution_intraday_gap and breadth < max(profile.distribution_breadth - 0.07, 0.0))
or (avg_funding < 0.0 and positive_ratio < profile.distribution_positive_ratio and breadth < profile.distribution_breadth)
)
if panic:
regimes.append("CAPITULATION_STRESS")
elif euphoria:
regimes.append("EUPHORIC_BREAKOUT")
elif expansion:
regimes.append("MOMENTUM_EXPANSION")
elif distribution:
regimes.append("DISTRIBUTION_DRIFT")
else:
regimes.append("CHOPPY_ROTATION")
prepared["strategic_regime"] = regimes
return prepared.reset_index(drop=True)
def build_variant_config(variant: FilterVariant) -> Strategy32Config:
cfg = build_strategy32_config(PROFILE_V7_DEFAULT)
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
cfg.universe_min_avg_dollar_volume = variant.avg_dollar_floor
cfg.momentum_min_score = variant.momentum_score
cfg.momentum_min_relative_strength = variant.relative_strength
cfg.momentum_min_7d_return = variant.ret7
cfg.max_pairwise_correlation = variant.corr_cap
cfg.carry_min_expected_edge = variant.carry_edge
return cfg
def regime_metrics_from_equity(curve: pd.Series, regime_frame: pd.DataFrame, bars_per_day: int) -> dict[str, dict[str, float]]:
returns = curve.pct_change().fillna(0.0).rename("equity_bar_return")
frame = regime_frame.merge(returns.rename_axis("timestamp").reset_index(), on="timestamp", how="left").fillna({"equity_bar_return": 0.0})
results: dict[str, dict[str, float]] = {}
for regime, chunk in frame.groupby("strategic_regime"):
eq = (1.0 + chunk["equity_bar_return"]).cumprod()
eq.index = pd.Index(chunk["timestamp"], name="timestamp")
total_return = float(eq.iloc[-1] - 1.0) if not eq.empty else 0.0
results[str(regime)] = {
"bars": int(len(chunk)),
"bar_share": float(len(chunk) / len(frame)) if len(frame) else 0.0,
"total_return": total_return,
"sharpe": sharpe_ratio(eq, bars_per_day),
"max_drawdown": max_drawdown(eq),
"positive_bar_ratio": float((chunk["equity_bar_return"] > 0).mean()) if len(chunk) else 0.0,
}
return results
def regime_score(metrics: dict[str, float]) -> float:
total_return = float(metrics["total_return"])
max_dd = abs(float(metrics["max_drawdown"]))
sharpe = float(metrics["sharpe"])
bar_share = float(metrics["bar_share"])
return 1.8 * (total_return / max(max_dd, 0.01)) + 0.8 * sharpe + 0.25 * total_return + 0.15 * bar_share
def main() -> None:
base = build_strategy32_config(PROFILE_V7_DEFAULT)
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
start = end - pd.Timedelta(days=1825 + base.warmup_days + 14)
bundle, latest_bar, accepted, rejected, quote_by_symbol = build_strategy32_market_bundle(
symbols=base.symbols,
auto_discover_symbols=True,
quote_assets=base.quote_assets,
excluded_base_assets=base.excluded_base_assets,
min_quote_volume_24h=min(variant.liquidity_floor for variant in FILTER_VARIANTS),
start=start,
end=end,
timeframe=base.timeframe,
max_staleness_days=base.max_symbol_staleness_days,
)
eval_start = latest_bar - pd.Timedelta(days=1825)
raw_start = eval_start - pd.Timedelta(days=base.warmup_days)
sliced = slice_bundle(bundle, raw_start, latest_bar)
regime_frame = build_strategic_regime_frame(sliced, eval_start, latest_bar)
variant_rows = []
regime_rankings: dict[str, list[dict[str, object]]] = {}
for variant in FILTER_VARIANTS:
cfg = build_variant_config(variant)
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
curve = result.equity_curve.loc[result.equity_curve.index >= regime_frame["timestamp"].iloc[0]]
metrics = regime_metrics_from_equity(curve, regime_frame, backtester.engine_config.bars_per_day)
total = {
"total_return": float(result.total_return),
"cagr": float(result.cagr),
"sharpe": float(result.sharpe),
"max_drawdown": float(result.max_drawdown),
"total_trades": int(result.total_trades),
}
variant_rows.append(
{
"variant": asdict(variant),
"total": total,
"regimes": metrics,
}
)
for regime_name, regime_metrics in metrics.items():
regime_rankings.setdefault(regime_name, []).append(
{
"variant_key": variant.key,
"variant_label": variant.label,
"score": regime_score(regime_metrics),
"metrics": regime_metrics,
"total": total,
}
)
for regime_name, rows in regime_rankings.items():
rows.sort(key=lambda row: float(row["score"]), reverse=True)
payload = {
"strategy": "strategy32",
"analysis": "regime_filter_fit",
"profile": PROFILE_V7_DEFAULT,
"latest_completed_bar": str(latest_bar),
"accepted_symbols": accepted,
"rejected_symbols": rejected,
"quote_by_symbol": quote_by_symbol,
"regime_distribution": (
regime_frame["strategic_regime"]
.value_counts(normalize=False)
.sort_index()
.rename_axis("regime")
.reset_index(name="bars")
.to_dict(orient="records")
),
"variant_rows": variant_rows,
"regime_rankings": regime_rankings,
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
lines = [
"# Strategy32 레짐별 필터 적합도 분석",
"",
"## 1. 목적",
"",
"유니버스 필터를 단일 기본값으로 고정하지 않고, 전략적으로 나눈 `5개 레짐`마다 어떤 필터가 더 잘 작동하는지 본다.",
"",
"## 2. 전략형 5개 레짐",
"",
"- `MOMENTUM_EXPANSION`: 장기/단기 추세가 위이고 breadth가 넓게 살아있는 구간",
"- `EUPHORIC_BREAKOUT`: breadth와 funding이 과열된 추세 확장 구간",
"- `CHOPPY_ROTATION`: 뚜렷한 추세가 없고 자금이 순환하는 박스권",
"- `DISTRIBUTION_DRIFT`: breadth와 funding이 식으며 약세로 기우는 구간",
"- `CAPITULATION_STRESS`: 고변동/급락/광범위 붕괴 구간",
"",
"## 3. 비교한 필터 후보",
"",
]
for variant in FILTER_VARIANTS:
lines.append(
f"- `{variant.key}`: liq `${variant.liquidity_floor/1_000_000:.0f}M`, "
f"avg `${variant.avg_dollar_floor/1_000_000:.0f}M`, "
f"score `{variant.momentum_score:.2f}`, rs `{variant.relative_strength:.2f}`, "
f"7d `{variant.ret7:.2f}`, corr `{variant.corr_cap:.2f}`"
)
lines.extend(["", "## 4. 레짐 분포", ""])
dist_frame = pd.DataFrame(payload["regime_distribution"])
if not dist_frame.empty:
total_bars = int(dist_frame["bars"].sum())
lines.append("| 레짐 | bars | 비중 |")
lines.append("|---|---:|---:|")
for row in dist_frame.itertuples(index=False):
lines.append(f"| `{row.regime}` | `{row.bars}` | `{row.bars / total_bars:.1%}` |")
lines.extend(["", "## 5. 레짐별 1위 필터", ""])
for regime_name in sorted(regime_rankings):
best = regime_rankings[regime_name][0]
metrics = best["metrics"]
lines.append(f"### {regime_name}")
lines.append("")
lines.append(f"- Best: `{best['variant_key']}` ({best['variant_label']})")
lines.append(f"- Regime return: `{metrics['total_return'] * 100:.2f}%`")
lines.append(f"- Regime MDD: `{metrics['max_drawdown'] * 100:.2f}%`")
lines.append(f"- Regime Sharpe: `{metrics['sharpe']:.2f}`")
lines.append(f"- Positive bar ratio: `{metrics['positive_bar_ratio']:.2%}`")
lines.append("")
lines.extend(["## 6. 필터별 전체 5y 결과", "", "| 필터 | 5y 수익률 | CAGR | MDD | Sharpe | 거래수 |", "|---|---:|---:|---:|---:|---:|"])
for row in sorted(variant_rows, key=lambda item: float(item["total"]["cagr"]), reverse=True):
total = row["total"]
lines.append(
f"| `{row['variant']['key']}` | `{total['total_return'] * 100:.2f}%` | `{total['cagr'] * 100:.2f}%` | "
f"`{total['max_drawdown'] * 100:.2f}%` | `{total['sharpe']:.2f}` | `{total['total_trades']}` |"
)
lines.extend(["", "## 7. 해석", ""])
for regime_name in sorted(regime_rankings):
top_two = regime_rankings[regime_name][:2]
summary = ", ".join(
f"`{row['variant_key']}` ({row['metrics']['total_return'] * 100:.1f}%, MDD {row['metrics']['max_drawdown'] * 100:.1f}%)"
for row in top_two
)
lines.append(f"- `{regime_name}`: 상위 후보는 {summary}")
lines.extend(
[
"",
"## 8. 원본 결과",
"",
f"- JSON: [{OUT_JSON}]({OUT_JSON})",
]
)
OUT_MD.write_text("\n".join(lines) + "\n", encoding="utf-8")
print(f"wrote {OUT_JSON}")
print(f"wrote {OUT_MD}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,296 @@
from __future__ import annotations
import json
import os
import sys
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import slice_bundle
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
from strategy32.research.soft_router import (
MacroScaleSpec,
build_cash_overlay_period_components,
compose_cash_overlay_curve,
load_component_bundle,
score_candidate,
segment_metrics,
)
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
OUT_JSON = Path("/tmp/strategy32_relaxed_macro_scaling_search.json")
RELAXED_OVERHEAT_OVERRIDES = {
**LIVE_STRATEGY_OVERRIDES,
"momentum_min_score": 0.58,
"momentum_min_relative_strength": -0.03,
"momentum_min_7d_return": 0.00,
"universe_min_avg_dollar_volume": 75_000_000.0,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
}
CURRENT_OVERHEAT_OVERRIDES = {
**LIVE_STRATEGY_OVERRIDES,
"hard_filter_refresh_cadence": "1d",
"hard_filter_min_history_bars": 120,
"hard_filter_lookback_bars": 30,
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
}
WINDOWS = (
(365, "1y"),
(730, "2y"),
(1095, "3y"),
(1460, "4y"),
(1825, "5y"),
)
YEAR_PERIODS = (
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
)
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
def _clip01(value: float) -> float:
return min(max(float(value), 0.0), 1.0)
def _ramp(value: float, start: float, end: float) -> float:
if end == start:
return 1.0 if value >= end else 0.0
if value <= start:
return 0.0
if value >= end:
return 1.0
return (value - start) / (end - start)
def _build_macro_scale_map(sliced_bundle, *, timestamps: list[pd.Timestamp], spec: MacroScaleSpec) -> pd.Series:
btc_prices = sliced_bundle.prices["BTC"]
closes = btc_prices.set_index("timestamp")["close"].astype(float).sort_index()
daily = closes.resample("1D").last().dropna()
weekly = daily.resample("W-SUN").last().dropna()
fast = weekly.ewm(span=spec.fast_weeks, adjust=False).mean()
slow = weekly.ewm(span=spec.slow_weeks, adjust=False).mean()
close_scale = (weekly / slow - 1.0).apply(lambda value: _ramp(float(value), spec.close_gap_start, spec.close_gap_full))
fast_scale = (fast / slow - 1.0).apply(lambda value: _ramp(float(value), spec.fast_gap_start, spec.fast_gap_full))
blended = spec.close_weight * close_scale + (1.0 - spec.close_weight) * fast_scale
macro_scale = spec.floor + (1.0 - spec.floor) * blended.clip(0.0, 1.0)
aligned = macro_scale.reindex(pd.DatetimeIndex(timestamps, name="timestamp"), method="ffill")
return aligned.fillna(1.0).clip(spec.floor, 1.0).astype(float)
def _candidate_specs() -> list[MacroScaleSpec]:
specs: list[MacroScaleSpec] = []
for floor in (0.25, 0.35, 0.45):
for close_gap_start, close_gap_full in ((-0.08, 0.02), (-0.06, 0.02), (-0.05, 0.04)):
for fast_gap_start, fast_gap_full in ((-0.04, 0.01), (-0.03, 0.02)):
for close_weight in (0.55, 0.65):
specs.append(
MacroScaleSpec(
floor=floor,
close_gap_start=close_gap_start,
close_gap_full=close_gap_full,
fast_gap_start=fast_gap_start,
fast_gap_full=fast_gap_full,
close_weight=close_weight,
)
)
return specs
def _collect_metrics(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
window_results: dict[str, dict[str, float]] = {}
for days, label in WINDOWS:
start = latest_bar - pd.Timedelta(days=days)
window_results[label] = segment_metrics(curve, start, latest_bar)
year_results: dict[str, dict[str, float]] = {}
for label, start, end_exclusive in YEAR_PERIODS:
year_results[label] = segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: window_results[label] for _, label in WINDOWS},
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
)
return window_results, year_results, score, negative_years, mdd_violations
def _evaluate_exact_sequential(
bundle,
latest_bar: pd.Timestamp,
*,
core_overrides: dict[str, object],
macro_scale_spec: MacroScaleSpec | None,
) -> dict[str, object]:
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
periods = [
*(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar) for days, label in WINDOWS),
*(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))) for label, start, end_exclusive in YEAR_PERIODS),
("year", "2026_YTD", YTD_START, latest_bar),
]
latest_weights: list[dict[str, object]] = []
for kind, label, start, end in periods:
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=core_overrides,
macro_scale_spec=macro_scale_spec,
)
curve, weights = compose_cash_overlay_curve(candidate=BEST_CASH_OVERLAY, **components)
metrics = segment_metrics(curve, start, end)
if kind == "window":
window_results[label] = metrics
else:
year_results[label] = metrics
if label == "2026_YTD":
latest_weights = weights.tail(1).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
score, negative_years, mdd_violations = score_candidate(
{label: window_results[label] for _, label in WINDOWS},
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
)
return {
"candidate": asdict(BEST_CASH_OVERLAY),
"core_overrides": core_overrides,
"macro_scale_spec": asdict(macro_scale_spec) if macro_scale_spec is not None else None,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": window_results,
"years": year_results,
"latest_weights": latest_weights,
"validation": "exact_independent_periods_cash_overlay_sequential",
}
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
sliced = slice_bundle(bundle, eval_start - pd.Timedelta(days=365), latest_bar)
print("[phase] build relaxed core components", flush=True)
relaxed_components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
print("[phase] search macro specs", flush=True)
search_rows: list[dict[str, object]] = []
specs = _candidate_specs()
for idx, spec in enumerate(specs, start=1):
macro_scale_map = _build_macro_scale_map(sliced, timestamps=relaxed_components["timestamps"][:-1], spec=spec)
curve, _weights = compose_cash_overlay_curve(
candidate=BEST_CASH_OVERLAY,
timestamps=relaxed_components["timestamps"],
score_frame=relaxed_components["score_frame"],
core_returns=relaxed_components["core_returns"],
core_exposure_frame=relaxed_components["core_exposure_frame"],
cap_returns=relaxed_components["cap_returns"],
chop_returns=relaxed_components["chop_returns"],
dist_returns=relaxed_components["dist_returns"],
macro_scale_map=macro_scale_map,
)
windows, years, score, negative_years, mdd_violations = _collect_metrics(curve, latest_bar)
search_rows.append(
{
"macro_scale_spec": asdict(spec),
"windows": windows,
"years": years,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
}
)
if idx % 6 == 0 or idx == len(specs):
print(f"[search] {idx}/{len(specs)}", flush=True)
search_rows.sort(key=lambda row: float(row["score"]), reverse=True)
top_search = search_rows[:5]
search_only = os.getenv("STRATEGY32_SEARCH_ONLY", "").strip().lower() in {"1", "true", "yes", "on"}
if search_only:
payload = {
"analysis": "relaxed_overheat_macro_scaling_search",
"mode": "search_only",
"latest_bar": str(latest_bar),
"core_filter": "relaxed_overheat",
"candidate": asdict(BEST_CASH_OVERLAY),
"search_top": top_search,
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}")
return
print("[phase] exact baselines", flush=True)
baselines = {
"current_overheat": _evaluate_exact_sequential(
bundle,
latest_bar,
core_overrides=CURRENT_OVERHEAT_OVERRIDES,
macro_scale_spec=None,
),
"relaxed_overheat": _evaluate_exact_sequential(
bundle,
latest_bar,
core_overrides=RELAXED_OVERHEAT_OVERRIDES,
macro_scale_spec=None,
),
}
best_spec = MacroScaleSpec(**top_search[0]["macro_scale_spec"])
print(f"[phase] exact best spec {best_spec.name}", flush=True)
best_exact = _evaluate_exact_sequential(
bundle,
latest_bar,
core_overrides=RELAXED_OVERHEAT_OVERRIDES,
macro_scale_spec=best_spec,
)
payload = {
"analysis": "relaxed_overheat_macro_scaling_search",
"latest_bar": str(latest_bar),
"core_filter": "relaxed_overheat",
"candidate": asdict(BEST_CASH_OVERLAY),
"baselines": baselines,
"search_top": top_search,
"best_exact": best_exact,
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,319 @@
from __future__ import annotations
import itertools
import json
import sys
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import slice_bundle
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness
from strategy32.research.hybrid_regime import _curve_returns, _run_adverse_component_curve
from strategy32.research.soft_router import (
WINDOWS,
YEAR_PERIODS,
YTD_START,
SoftRouterCandidate,
build_regime_score_frame,
compose_soft_router_curve,
evaluate_candidate_exact,
load_component_bundle,
score_candidate,
segment_metrics,
)
from strategy32.research.hybrid_regime import STATIC_FILTERS
OUT_JSON = Path("/tmp/strategy32_soft_router_search.json")
OUT_MD = Path("/Volumes/SSD/data/nextcloud/data/tara/files/📂HeadOffice/money-bot/strategy32/015_soft_router_탐색결과.md")
PROFILES = ("loose_positive",)
CORE_FILTERS = ("overheat_tolerant", "prev_balanced")
CAP_ENGINES = ("cap_btc_rebound",)
CHOP_ENGINES = ("chop_inverse_carry", "chop_inverse_carry_strict")
DIST_ENGINES = ("dist_inverse_carry_strict",)
CORE_FLOORS = (0.00, 0.10, 0.20)
CAP_MAX_WEIGHTS = (0.20, 0.35, 0.50)
CHOP_MAX_WEIGHTS = (0.10, 0.20, 0.35)
DIST_MAX_WEIGHTS = (0.10, 0.20, 0.35)
CHOP_BLEND_FLOORS = (0.00, 0.10, 0.20)
def _evaluate_from_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
window_results = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
year_results = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
window_results,
{k: v for k, v in year_results.items() if k != "2026_YTD"},
)
return window_results, year_results, score, negative_years, mdd_violations
def _exact_static_variant(bundle, latest_bar: pd.Timestamp, filter_name: str) -> dict[str, object]:
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
for days, label in WINDOWS:
eval_start = latest_bar - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=90)
from strategy29.backtest.window_analysis import slice_bundle
sliced = slice_bundle(bundle, raw_start, latest_bar)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
window_results[label] = segment_metrics(curve, eval_start, latest_bar)
for label, start, end_exclusive in YEAR_PERIODS:
eval_end = min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))
raw_start = start - pd.Timedelta(days=90)
from strategy29.backtest.window_analysis import slice_bundle
sliced = slice_bundle(bundle, raw_start, eval_end)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=start)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= start]
year_results[label] = segment_metrics(curve, start, eval_end)
raw_start = YTD_START - pd.Timedelta(days=90)
from strategy29.backtest.window_analysis import slice_bundle
sliced = slice_bundle(bundle, raw_start, latest_bar)
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
backtester = Strategy32Backtester(cfg, sliced, trade_start=YTD_START)
backtester.engine_config.initial_capital = 1000.0
curve = backtester.run().equity_curve.loc[lambda s: s.index >= YTD_START]
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
window_results,
{k: v for k, v in year_results.items() if k != "2026_YTD"},
)
return {
"name": filter_name,
"windows": window_results,
"years": year_results,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"validation": "exact_static_variant",
}
def main() -> None:
bundle, latest_bar = load_component_bundle()
eval_start = latest_bar - pd.Timedelta(days=1825)
raw_start = eval_start - pd.Timedelta(days=90)
sliced = slice_bundle(bundle, raw_start, latest_bar)
precomputed: dict[str, object] = {"profiles": {}}
for profile_name in PROFILES:
score_frame = build_regime_score_frame(sliced, eval_start, latest_bar, profile_name=profile_name)
harness = AdverseRegimeResearchHarness(sliced, latest_bar)
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
core_returns: dict[str, pd.Series] = {}
adverse_returns: dict[str, pd.Series] = {}
for core_filter in CORE_FILTERS:
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[core_filter])
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
core_curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
core_returns[core_filter] = _curve_returns(core_curve)
print(f"[cache core] {profile_name}|{core_filter}", flush=True)
for engine_name in sorted(set(CAP_ENGINES) | set(CHOP_ENGINES) | set(DIST_ENGINES)):
adverse_curve = _run_adverse_component_curve(
eval_start=eval_start,
engine_name=engine_name,
harness=harness,
regime_frame=score_frame,
)
adverse_returns[engine_name] = _curve_returns(adverse_curve)
print(f"[cache adverse] {profile_name}|{engine_name}", flush=True)
precomputed["profiles"][profile_name] = {
"score_frame": score_frame,
"timestamps": timestamps,
"core_returns": core_returns,
"adverse_returns": adverse_returns,
}
candidates = [
SoftRouterCandidate(*combo)
for combo in itertools.product(
PROFILES,
CORE_FILTERS,
CAP_ENGINES,
CHOP_ENGINES,
DIST_ENGINES,
CORE_FLOORS,
CAP_MAX_WEIGHTS,
CHOP_MAX_WEIGHTS,
DIST_MAX_WEIGHTS,
CHOP_BLEND_FLOORS,
)
]
approx_rows: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
profile_cache = precomputed["profiles"][candidate.regime_profile]
components = {
"timestamps": profile_cache["timestamps"],
"score_frame": profile_cache["score_frame"],
"core_returns": profile_cache["core_returns"][candidate.core_filter],
"cap_returns": profile_cache["adverse_returns"][candidate.cap_engine],
"chop_returns": profile_cache["adverse_returns"][candidate.chop_engine],
"dist_returns": profile_cache["adverse_returns"][candidate.dist_engine],
}
curve, weights = compose_soft_router_curve(candidate=candidate, **components)
window_results, year_results, score, negative_years, mdd_violations = _evaluate_from_curve(curve, latest_bar)
approx_rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": window_results,
"years": year_results,
"avg_weights": {
"core": float(weights["core_weight"].mean()),
"cap": float(weights["cap_weight"].mean()),
"chop": float(weights["chop_weight"].mean()),
"dist": float(weights["dist_weight"].mean()),
"cash": float(weights["cash_weight"].mean()),
},
"validation": "approx_full_curve_slice",
}
)
if idx % 100 == 0 or idx == len(candidates):
print(
f"[approx {idx:04d}/{len(candidates)}] top={approx_rows[-1]['name']} "
f"1y={window_results['1y']['total_return'] * 100:.2f}% "
f"5y_ann={window_results['5y']['annualized_return'] * 100:.2f}%",
flush=True,
)
approx_rows.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
exact_top = []
for row in approx_rows[:3]:
candidate = SoftRouterCandidate(**row["candidate"])
print(f"[exact-start] {candidate.name}", flush=True)
result = evaluate_candidate_exact(bundle=bundle, latest_bar=latest_bar, candidate=candidate)
exact_top.append(result)
exact_top.sort(key=lambda item: (int(item["negative_years"]), int(item["mdd_violations"]), -float(item["score"])))
print(
f"[exact] {candidate.name} 1y={result['windows']['1y']['total_return'] * 100:.2f}% "
f"5y_ann={result['windows']['5y']['annualized_return'] * 100:.2f}% "
f"neg={result['negative_years']} mdd_viol={result['mdd_violations']}",
flush=True,
)
static_exact = [_exact_static_variant(bundle, latest_bar, filter_name) for filter_name in CORE_FILTERS]
payload = {
"analysis": "strategy32_soft_router_search",
"latest_completed_bar": str(latest_bar),
"candidate_count": len(candidates),
"component_cache_count": sum(
len(profile_cache["core_returns"]) + len(profile_cache["adverse_returns"])
for profile_cache in precomputed["profiles"].values()
),
"summary": approx_rows[:20],
"exact_top": exact_top,
"exact_static": static_exact,
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
lines = [
"# Strategy32 Soft Router 탐색결과",
"",
"## 1. 목적",
"",
"`5개 하드 레짐 -> 1엔진 선택` 구조를 버리고, `정적 코어 엔진 + adverse overlay` 구조를 연속형 점수 기반으로 탐색한다.",
"",
"## 2. 탐색 범위",
"",
f"- profiles: `{', '.join(PROFILES)}`",
f"- core filters: `{', '.join(CORE_FILTERS)}`",
f"- cap engines: `{', '.join(CAP_ENGINES)}`",
f"- chop engines: `{', '.join(CHOP_ENGINES)}`",
f"- dist engines: `{', '.join(DIST_ENGINES)}`",
f"- total candidates: `{len(candidates)}`",
"",
"## 3. exact 상위 후보",
"",
"| rank | candidate | 1y | 2y ann | 3y ann | 4y ann | 5y ann | 5y MDD | 2025 | 2024 |",
"|---|---|---:|---:|---:|---:|---:|---:|---:|---:|",
]
for idx, row in enumerate(exact_top, start=1):
lines.append(
f"| `{idx}` | `{row['name']}` | `{row['windows']['1y']['total_return'] * 100:.2f}%` | "
f"`{row['windows']['2y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['3y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['4y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['max_drawdown'] * 100:.2f}%` | "
f"`{row['years']['2025']['total_return'] * 100:.2f}%` | "
f"`{row['years']['2024']['total_return'] * 100:.2f}%` |"
)
lines.extend(
[
"",
"## 4. 정적 코어 exact 비교",
"",
"| core filter | 1y | 2y ann | 3y ann | 4y ann | 5y ann | 5y MDD | 2025 | 2024 |",
"|---|---:|---:|---:|---:|---:|---:|---:|---:|",
]
)
for row in static_exact:
lines.append(
f"| `{row['name']}` | `{row['windows']['1y']['total_return'] * 100:.2f}%` | "
f"`{row['windows']['2y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['3y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['4y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['annualized_return'] * 100:.2f}%` | "
f"`{row['windows']['5y']['max_drawdown'] * 100:.2f}%` | "
f"`{row['years']['2025']['total_return'] * 100:.2f}%` | "
f"`{row['years']['2024']['total_return'] * 100:.2f}%` |"
)
lines.extend(
[
"",
"## 5. 해석",
"",
"- soft router가 정적 코어보다 좋아지려면, adverse overlay가 `2024/2025 방어`를 만들어내면서 `5y CAGR`을 크게 훼손하지 않아야 한다.",
"- exact 결과가 정적 코어보다 약하면, 현재 adverse overlay 신호 품질 또는 overlay weight 공식이 아직 최적이 아니라는 뜻이다.",
"",
"## 6. 원본 결과",
"",
f"- JSON: [{OUT_JSON}]({OUT_JSON})",
]
)
OUT_MD.write_text("\n".join(lines) + "\n", encoding="utf-8")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,144 @@
from __future__ import annotations
import copy
import json
import sys
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
from strategy32.backtest.simulator import Strategy32Backtester
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
from strategy32.data import build_strategy32_market_bundle
WINDOWS = [(7, "1w"), (30, "1m"), (365, "1y"), (1095, "3y"), (1825, "5y")]
def balanced_score(results: dict[str, dict[str, float | int | str]]) -> float:
score = 0.0
for label, weight in (("1y", 1.0), ("3y", 1.0), ("5y", 1.2)):
annualized = float(results[label]["annualized_return"])
drawdown = abs(float(results[label]["max_drawdown"]))
score += weight * (annualized / max(drawdown, 0.01))
score += 0.15 * float(results["1m"]["total_return"])
return score
def main() -> None:
base = build_strategy32_config(PROFILE_V7_DEFAULT)
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
start = end - pd.Timedelta(days=max(days for days, _ in WINDOWS) + base.warmup_days + 14)
variants: list[tuple[str, dict[str, bool]]] = [
("v7_default", {}),
("v7_plus_expanded_hedge", {"enable_expanded_hedge": True}),
("v7_plus_max_holding_exit", {"enable_max_holding_exit": True}),
("v7_plus_expanded_hedge_plus_max_holding_exit", {"enable_expanded_hedge": True, "enable_max_holding_exit": True}),
]
print("fetching bundle...")
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle(
symbols=base.symbols,
auto_discover_symbols=base.auto_discover_symbols,
quote_assets=base.quote_assets,
excluded_base_assets=base.excluded_base_assets,
min_quote_volume_24h=base.discovery_min_quote_volume_24h,
start=start,
end=end,
timeframe=base.timeframe,
max_staleness_days=base.max_symbol_staleness_days,
)
print("latest", latest_completed_bar)
results: dict[str, dict[str, dict[str, float | int | str]]] = {}
summary_rows: list[dict[str, float | int | str]] = []
for name, overrides in variants:
cfg = copy.deepcopy(base)
for attr, value in overrides.items():
setattr(cfg, attr, value)
variant_results = {}
print(f"\nVARIANT {name}")
for days, label in WINDOWS:
eval_end = latest_completed_bar
eval_start = eval_end - pd.Timedelta(days=days)
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
sliced = slice_bundle(bundle, raw_start, eval_end)
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
backtester.engine_config.initial_capital = 1000.0
result = backtester.run()
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
metrics["engine_pnl"] = result.engine_pnl
metrics["total_trades"] = result.total_trades
variant_results[label] = metrics
print(
label,
"ret",
round(float(metrics["total_return"]) * 100, 2),
"mdd",
round(float(metrics["max_drawdown"]) * 100, 2),
"sharpe",
round(float(metrics["sharpe"]), 2),
"trades",
metrics["trade_count"],
)
score = balanced_score(variant_results)
results[name] = variant_results
summary_rows.append(
{
"name": name,
"balanced_score": score,
"ret_1w": float(variant_results["1w"]["total_return"]),
"ret_1m": float(variant_results["1m"]["total_return"]),
"ret_1y": float(variant_results["1y"]["total_return"]),
"ret_3y": float(variant_results["3y"]["total_return"]),
"ret_5y": float(variant_results["5y"]["total_return"]),
"mdd_1y": float(variant_results["1y"]["max_drawdown"]),
"mdd_3y": float(variant_results["3y"]["max_drawdown"]),
"mdd_5y": float(variant_results["5y"]["max_drawdown"]),
}
)
summary_rows.sort(key=lambda row: float(row["balanced_score"]), reverse=True)
payload = {
"strategy": "strategy32",
"analysis": "v7_branch_validation",
"profile": PROFILE_V7_DEFAULT,
"initial_capital": 1000.0,
"auto_discover_symbols": base.auto_discover_symbols,
"latest_completed_bar": str(latest_completed_bar),
"requested_symbols": [] if base.auto_discover_symbols else base.symbols,
"accepted_symbols": accepted_symbols,
"rejected_symbols": rejected_symbols,
"quote_by_symbol": quote_by_symbol,
"timeframe": base.timeframe,
"results": results,
"summary": summary_rows,
}
out = Path("/tmp/strategy32_v7_branch_validation.json")
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print("\nRanked variants")
for row in summary_rows:
print(
row["name"],
"score",
round(float(row["balanced_score"]), 3),
"1y",
round(float(row["ret_1y"]) * 100, 2),
"3y",
round(float(row["ret_3y"]) * 100, 2),
"5y",
round(float(row["ret_5y"]) * 100, 2),
"mdd5y",
round(float(row["mdd_5y"]) * 100, 2),
)
print("\nwrote", out)
if __name__ == "__main__":
main()