Add blocker research and routing rename

This commit is contained in:
2026-03-17 16:16:27 -07:00
parent c165a9add7
commit 88c7c7790d
20 changed files with 3417 additions and 3 deletions

View File

@@ -0,0 +1,278 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate, segment_metrics
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
RELAXED_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
_baseline_summary,
_overlay_weights,
)
OUT_JSON = Path("/tmp/strategy32_current_relaxed_block_router.json")
@dataclass(frozen=True, slots=True)
class BlockRouterCandidate:
positive_regimes: tuple[str, ...]
core_score_min: float
breadth_persist_min: float
funding_persist_min: float
panic_max: float
choppy_max: float
distribution_max: float
current_cash_min: float
block_bars: int
@property
def name(self) -> str:
regimes = ",".join(self.positive_regimes)
return (
f"regimes:{regimes}"
f"|core>={self.core_score_min:.2f}"
f"|breadth>={self.breadth_persist_min:.2f}"
f"|funding>={self.funding_persist_min:.2f}"
f"|panic<={self.panic_max:.2f}"
f"|choppy<={self.choppy_max:.2f}"
f"|dist<={self.distribution_max:.2f}"
f"|cash>={self.current_cash_min:.2f}"
f"|block:{self.block_bars}"
)
def _build_strategy_detail(components: dict[str, object]) -> pd.DataFrame:
timestamps = list(components["timestamps"])
score_map = components["score_frame"].set_index("timestamp").sort_index()
cash_map = components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
core_returns = components["core_returns"]
cap_returns = components["cap_returns"]
chop_returns = components["chop_returns"]
dist_returns = components["dist_returns"]
rows: list[dict[str, object]] = []
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
cap_weight, chop_weight, dist_weight = _overlay_weights(BEST_CASH_OVERLAY, score_row, core_cash_pct)
portfolio_return = (
float(core_returns.get(execution_ts, 0.0))
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
)
rows.append(
{
"timestamp": execution_ts,
"strategic_regime": str(score_row.get("strategic_regime", "")),
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"core_cash_pct": core_cash_pct,
"portfolio_return": portfolio_return,
}
)
return pd.DataFrame(rows)
def _pick_relaxed(row: pd.Series, candidate: BlockRouterCandidate) -> bool:
return (
str(row.get("strategic_regime", "")) in candidate.positive_regimes
and float(row.get("core_score", 0.0)) >= candidate.core_score_min
and float(row.get("breadth_persist", 0.0)) >= candidate.breadth_persist_min
and float(row.get("funding_persist", 0.0)) >= candidate.funding_persist_min
and float(row.get("panic_score", 0.0)) <= candidate.panic_max
and float(row.get("choppy_score", 0.0)) <= candidate.choppy_max
and float(row.get("distribution_score", 0.0)) <= candidate.distribution_max
and float(row.get("current_cash_pct", 0.0)) >= candidate.current_cash_min
)
def _compose_block_returns(detail: pd.DataFrame, candidate: BlockRouterCandidate) -> pd.Series:
returns: list[float] = []
idx: list[pd.Timestamp] = []
rows = detail.reset_index(drop=True)
for start in range(0, len(rows), candidate.block_bars):
end = min(start + candidate.block_bars, len(rows))
block = rows.iloc[start:end]
trigger = block.iloc[0]
use_relaxed = _pick_relaxed(trigger, candidate)
source_col = "relaxed_return" if use_relaxed else "current_return"
returns.extend(block[source_col].tolist())
idx.extend(block["timestamp"].tolist())
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _curve_from_returns(returns: pd.Series) -> pd.Series:
equity = 1000.0
vals = [equity]
idx = [returns.index[0] - pd.Timedelta(hours=4)]
for ts, ret in returns.items():
equity *= max(0.0, 1.0 + float(ret))
idx.append(pd.Timestamp(ts))
vals.append(equity)
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
windows = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
years = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return windows, years, score, negative_years, mdd_violations
def _candidate_space() -> list[BlockRouterCandidate]:
space: list[BlockRouterCandidate] = []
positive_sets = (
("MOMENTUM_EXPANSION",),
("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
("CHOPPY_ROTATION", "MOMENTUM_EXPANSION"),
)
for positive_regimes in positive_sets:
for core_score_min in (0.50, 0.55, 0.60):
for breadth_persist_min in (0.45, 0.50, 0.55):
for funding_persist_min in (0.50, 0.55, 0.60):
for panic_max in (0.20, 0.30):
for choppy_max in (0.20, 0.30, 0.40):
for distribution_max in (0.20, 0.30, 0.40):
for current_cash_min in (0.50, 0.65, 0.80):
for block_bars in (42, 84, 180):
space.append(
BlockRouterCandidate(
positive_regimes=positive_regimes,
core_score_min=core_score_min,
breadth_persist_min=breadth_persist_min,
funding_persist_min=funding_persist_min,
panic_max=panic_max,
choppy_max=choppy_max,
distribution_max=distribution_max,
current_cash_min=current_cash_min,
block_bars=block_bars,
)
)
return space
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current", flush=True)
current = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
print("[phase] build relaxed", flush=True)
relaxed = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
current_detail = _build_strategy_detail(current).rename(
columns={
"core_cash_pct": "current_cash_pct",
"portfolio_return": "current_return",
}
)
relaxed_detail = _build_strategy_detail(relaxed).rename(
columns={
"core_cash_pct": "relaxed_cash_pct",
"portfolio_return": "relaxed_return",
}
)
detail = current_detail.merge(
relaxed_detail[["timestamp", "relaxed_cash_pct", "relaxed_return"]],
on="timestamp",
how="inner",
)
rows: list[dict[str, object]] = []
candidates = _candidate_space()
print(f"[phase] search {len(candidates)} block-router candidates", flush=True)
for idx, candidate in enumerate(candidates, start=1):
returns = _compose_block_returns(detail, candidate)
curve = _curve_from_returns(returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
)
if idx % 96 == 0 or idx == len(candidates):
print(f"[search] {idx}/{len(candidates)}", flush=True)
rows.sort(key=lambda row: float(row["score"]), reverse=True)
best = BlockRouterCandidate(**rows[0]["candidate"])
best_returns = _compose_block_returns(detail, best)
best_curve = _curve_from_returns(best_returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(best_curve, latest_bar)
payload = {
"analysis": "current_relaxed_block_router",
"latest_bar": str(latest_bar),
"candidate": asdict(best),
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
"baselines": _baseline_summary(),
"search_top": rows[:10],
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()