Add blocker research and routing rename
This commit is contained in:
205
scripts/run_current_cash_ensemble_search.py
Normal file
205
scripts/run_current_cash_ensemble_search.py
Normal file
@@ -0,0 +1,205 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.live.runtime import BEST_CASH_OVERLAY
|
||||
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle
|
||||
from strategy32.scripts.run_current_cash_learned_blocker import (
|
||||
CACHE_PATH,
|
||||
CURRENT_OVERHEAT_OVERRIDES,
|
||||
LearnedBlockerCandidate,
|
||||
_build_block_dataset,
|
||||
_build_regime_columns,
|
||||
_build_strategy_detail,
|
||||
_curve_from_returns,
|
||||
_metrics_for_curve,
|
||||
_simulate_candidate,
|
||||
)
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_current_cash_ensemble_search.json")
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class StaticBlendCandidate:
|
||||
blocker: LearnedBlockerCandidate
|
||||
baseline_weight: float
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return f"static|bw:{self.baseline_weight:.2f}|{self.blocker.name}"
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class RelativeBlendCandidate:
|
||||
blocker: LearnedBlockerCandidate
|
||||
lookback_bars: int
|
||||
threshold: float
|
||||
baseline_weight_high: float
|
||||
baseline_weight_mid: float
|
||||
baseline_weight_low: float
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"relative|lb:{self.lookback_bars}|th:{self.threshold:.3f}"
|
||||
f"|hi:{self.baseline_weight_high:.2f}|mid:{self.baseline_weight_mid:.2f}|lo:{self.baseline_weight_low:.2f}"
|
||||
f"|{self.blocker.name}"
|
||||
)
|
||||
|
||||
|
||||
def _segment_score(windows: dict[str, dict[str, float]], years: dict[str, dict[str, float]]) -> tuple[float, int, int]:
|
||||
negative_years = sum(1 for year in ("2021", "2022", "2023", "2024", "2025") if years[year]["total_return"] < 0.0)
|
||||
mdd_violations = sum(1 for label in ("1y", "2y", "3y", "4y", "5y") if windows[label]["max_drawdown"] < -0.20)
|
||||
score = 0.0
|
||||
score += 4.0 * windows["1y"]["total_return"]
|
||||
score += 2.0 * windows["2y"]["annualized_return"]
|
||||
score += 1.5 * windows["3y"]["annualized_return"]
|
||||
score += 2.5 * windows["5y"]["annualized_return"]
|
||||
score += 0.75 * years["2025"]["total_return"]
|
||||
score += 0.50 * years["2024"]["total_return"]
|
||||
score += 0.20 * years["2026_YTD"]["total_return"]
|
||||
score += 0.25 * max(0.0, -0.15 - windows["5y"]["max_drawdown"])
|
||||
score -= 1.5 * negative_years
|
||||
score -= 0.4 * mdd_violations
|
||||
return score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def _compute_metrics(returns: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
|
||||
curve = _curve_from_returns(returns)
|
||||
windows, years, _, _, _ = _metrics_for_curve(curve, latest_bar)
|
||||
score, negative_years, mdd_violations = _segment_score(windows, years)
|
||||
return windows, years, score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def _static_blend_returns(
|
||||
baseline_returns: pd.Series,
|
||||
blocker_returns: pd.Series,
|
||||
candidate: StaticBlendCandidate,
|
||||
) -> pd.Series:
|
||||
bw = candidate.baseline_weight
|
||||
return baseline_returns.mul(bw).add(blocker_returns.mul(1.0 - bw), fill_value=0.0)
|
||||
|
||||
|
||||
def _relative_blend_returns(
|
||||
baseline_returns: pd.Series,
|
||||
blocker_returns: pd.Series,
|
||||
candidate: RelativeBlendCandidate,
|
||||
) -> pd.Series:
|
||||
baseline_trailing = baseline_returns.shift(1).rolling(candidate.lookback_bars, min_periods=max(6, candidate.lookback_bars // 3)).sum()
|
||||
blocker_trailing = blocker_returns.shift(1).rolling(candidate.lookback_bars, min_periods=max(6, candidate.lookback_bars // 3)).sum()
|
||||
diff = baseline_trailing.sub(blocker_trailing, fill_value=0.0).fillna(0.0)
|
||||
weights = pd.Series(candidate.baseline_weight_mid, index=baseline_returns.index, dtype=float)
|
||||
weights = weights.mask(diff >= candidate.threshold, candidate.baseline_weight_high)
|
||||
weights = weights.mask(diff <= -candidate.threshold, candidate.baseline_weight_low)
|
||||
return baseline_returns.mul(weights).add(blocker_returns.mul(1.0 - weights), fill_value=0.0)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_component_bundle(CACHE_PATH)
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
print("[phase] build baseline/detail", flush=True)
|
||||
components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=latest_bar,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
detail = _build_strategy_detail(components)
|
||||
regime_columns = _build_regime_columns(detail)
|
||||
baseline_returns = detail.set_index("timestamp")["portfolio_return"].astype(float)
|
||||
|
||||
blocker_candidates = [
|
||||
LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.25),
|
||||
LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.50),
|
||||
]
|
||||
blocker_returns_map: dict[str, pd.Series] = {}
|
||||
block_frame_cache: dict[int, pd.DataFrame] = {}
|
||||
for blocker in blocker_candidates:
|
||||
if blocker.block_bars not in block_frame_cache:
|
||||
block_frame_cache[blocker.block_bars] = _build_block_dataset(detail, blocker.block_bars, regime_columns)
|
||||
blocker_returns_map[blocker.name] = _simulate_candidate(detail, block_frame_cache[blocker.block_bars], regime_columns, blocker)
|
||||
|
||||
baseline_windows, baseline_years, baseline_score, baseline_negative_years, baseline_mdd_violations = _compute_metrics(baseline_returns, latest_bar)
|
||||
|
||||
static_candidates = [
|
||||
StaticBlendCandidate(blocker=blocker, baseline_weight=weight)
|
||||
for blocker in blocker_candidates
|
||||
for weight in (0.50, 0.60, 0.70, 0.75, 0.80, 0.85, 0.90)
|
||||
]
|
||||
relative_candidates = [
|
||||
RelativeBlendCandidate(
|
||||
blocker=blocker,
|
||||
lookback_bars=lookback,
|
||||
threshold=threshold,
|
||||
baseline_weight_high=high,
|
||||
baseline_weight_mid=mid,
|
||||
baseline_weight_low=low,
|
||||
)
|
||||
for blocker in blocker_candidates
|
||||
for lookback in (21, 42, 63)
|
||||
for threshold in (0.0, 0.01, 0.02)
|
||||
for high, mid, low in (
|
||||
(0.90, 0.75, 0.55),
|
||||
(0.85, 0.70, 0.50),
|
||||
(0.80, 0.65, 0.45),
|
||||
)
|
||||
]
|
||||
|
||||
top: list[dict[str, object]] = []
|
||||
all_candidates: list[object] = [*static_candidates, *relative_candidates]
|
||||
print(f"[phase] ensemble search {len(all_candidates)} candidates", flush=True)
|
||||
for idx, candidate in enumerate(all_candidates, start=1):
|
||||
if isinstance(candidate, StaticBlendCandidate):
|
||||
returns = _static_blend_returns(baseline_returns, blocker_returns_map[candidate.blocker.name], candidate)
|
||||
else:
|
||||
returns = _relative_blend_returns(baseline_returns, blocker_returns_map[candidate.blocker.name], candidate)
|
||||
windows, years, score, negative_years, mdd_violations = _compute_metrics(returns, latest_bar)
|
||||
payload = {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": windows,
|
||||
"years": years,
|
||||
}
|
||||
top.append(payload)
|
||||
top.sort(key=lambda item: float(item["score"]), reverse=True)
|
||||
top = top[:12]
|
||||
if idx % 25 == 0 or idx == len(all_candidates):
|
||||
print(f"[search] {idx}/{len(all_candidates)}", flush=True)
|
||||
|
||||
output = {
|
||||
"analysis": "current_cash_ensemble_search",
|
||||
"latest_bar": str(latest_bar),
|
||||
"baseline": {
|
||||
"score": baseline_score,
|
||||
"negative_years": baseline_negative_years,
|
||||
"mdd_violations": baseline_mdd_violations,
|
||||
"windows": baseline_windows,
|
||||
"years": baseline_years,
|
||||
},
|
||||
"top12": top,
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
|
||||
print(json.dumps(top[:5], indent=2))
|
||||
print(f"[saved] {OUT_JSON}", flush=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user