170 lines
5.9 KiB
Python
170 lines
5.9 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
import multiprocessing as mp
|
|
import sys
|
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
from dataclasses import asdict
|
|
from pathlib import Path
|
|
|
|
import pandas as pd
|
|
|
|
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
|
if str(PACKAGE_PARENT) not in sys.path:
|
|
sys.path.insert(0, str(PACKAGE_PARENT))
|
|
|
|
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
|
|
BASELINE_PATH,
|
|
BEST_CASH_OVERLAY,
|
|
CACHE_PATH,
|
|
CURRENT_OVERHEAT_OVERRIDES,
|
|
OUT_JSON as SEARCH_OUT_JSON,
|
|
RELAXED_OVERHEAT_OVERRIDES,
|
|
WINDOWS,
|
|
YEAR_PERIODS,
|
|
YTD_START,
|
|
HybridSwitchCandidate,
|
|
_compose_hybrid_curve,
|
|
)
|
|
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate, segment_metrics
|
|
|
|
|
|
OUT_JSON = Path("/tmp/strategy32_current_relaxed_hybrid_exact.json")
|
|
|
|
BEST_SEARCH_CANDIDATE = HybridSwitchCandidate(
|
|
positive_regimes=("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
|
|
core_score_min=0.60,
|
|
breadth_persist_min=0.50,
|
|
funding_persist_min=0.55,
|
|
panic_max=0.20,
|
|
choppy_max=0.40,
|
|
distribution_max=0.30,
|
|
)
|
|
|
|
|
|
def _baseline_summary() -> dict[str, object]:
|
|
payload = json.loads(BASELINE_PATH.read_text(encoding="utf-8"))
|
|
variants = payload["variants"]
|
|
return {name: variants[name]["results"] for name in ("current_overheat", "relaxed_overheat")}
|
|
|
|
|
|
def _period_specs(latest_bar: pd.Timestamp) -> list[tuple[str, str, pd.Timestamp, pd.Timestamp]]:
|
|
specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
|
for days, label in WINDOWS:
|
|
specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
|
|
for label, start, end_exclusive in YEAR_PERIODS:
|
|
specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
|
|
specs.append(("year", "2026_YTD", YTD_START, latest_bar))
|
|
return specs
|
|
|
|
|
|
def _period_worker(
|
|
cache_path: str,
|
|
candidate_payload: dict[str, object],
|
|
kind: str,
|
|
label: str,
|
|
start_text: str,
|
|
end_text: str,
|
|
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
|
|
bundle, _ = load_component_bundle(cache_path)
|
|
candidate = HybridSwitchCandidate(**candidate_payload)
|
|
start = pd.Timestamp(start_text)
|
|
end = pd.Timestamp(end_text)
|
|
current = build_cash_overlay_period_components(
|
|
bundle=bundle,
|
|
eval_start=start,
|
|
eval_end=end,
|
|
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
|
core_filter=BEST_CASH_OVERLAY.core_filter,
|
|
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
|
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
|
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
|
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
|
|
)
|
|
relaxed = build_cash_overlay_period_components(
|
|
bundle=bundle,
|
|
eval_start=start,
|
|
eval_end=end,
|
|
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
|
core_filter=BEST_CASH_OVERLAY.core_filter,
|
|
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
|
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
|
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
|
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
|
|
)
|
|
curve, rows = _compose_hybrid_curve(
|
|
current_components=current,
|
|
relaxed_components=relaxed,
|
|
switch_candidate=candidate,
|
|
)
|
|
latest_rows: list[dict[str, object]] = []
|
|
if label == "2026_YTD":
|
|
latest_rows = rows.tail(5).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
|
|
return kind, label, segment_metrics(curve, start, end), latest_rows
|
|
|
|
|
|
def main() -> None:
|
|
if SEARCH_OUT_JSON.exists():
|
|
payload = json.loads(SEARCH_OUT_JSON.read_text(encoding="utf-8"))
|
|
if payload.get("search_top"):
|
|
best_candidate = HybridSwitchCandidate(**payload["search_top"][0]["candidate"])
|
|
else:
|
|
best_candidate = BEST_SEARCH_CANDIDATE
|
|
else:
|
|
best_candidate = BEST_SEARCH_CANDIDATE
|
|
|
|
_, latest_bar = load_component_bundle(CACHE_PATH)
|
|
window_results: dict[str, dict[str, float]] = {}
|
|
year_results: dict[str, dict[str, float]] = {}
|
|
latest_rows: list[dict[str, object]] = []
|
|
|
|
specs = _period_specs(latest_bar)
|
|
ctx = mp.get_context("fork")
|
|
with ProcessPoolExecutor(max_workers=min(6, len(specs)), mp_context=ctx) as executor:
|
|
future_map = {
|
|
executor.submit(
|
|
_period_worker,
|
|
CACHE_PATH,
|
|
asdict(best_candidate),
|
|
kind,
|
|
label,
|
|
str(start),
|
|
str(end),
|
|
): (kind, label)
|
|
for kind, label, start, end in specs
|
|
}
|
|
for future in as_completed(future_map):
|
|
kind, label = future_map[future]
|
|
kind_result, label_result, metrics, latest = future.result()
|
|
if kind_result == "window":
|
|
window_results[label_result] = metrics
|
|
else:
|
|
year_results[label_result] = metrics
|
|
if latest:
|
|
latest_rows = latest
|
|
print(f"[done] {label_result}", flush=True)
|
|
|
|
score, negative_years, mdd_violations = score_candidate(
|
|
{label: window_results[label] for _, label in WINDOWS},
|
|
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
|
|
)
|
|
payload = {
|
|
"analysis": "current_relaxed_hybrid_exact",
|
|
"latest_bar": str(latest_bar),
|
|
"candidate": asdict(best_candidate),
|
|
"score": score,
|
|
"negative_years": negative_years,
|
|
"mdd_violations": mdd_violations,
|
|
"windows": {label: window_results[label] for _, label in WINDOWS},
|
|
"years": year_results,
|
|
"latest_rows": latest_rows,
|
|
"baselines": _baseline_summary(),
|
|
}
|
|
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
|
print(json.dumps(payload, indent=2))
|
|
print(f"[saved] {OUT_JSON}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|